source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
Utilities.h | #ifndef __UTILITIES_H__
#define __UTILITIES_H__
#include <fstream>
#include <iostream>
#include <algorithm>
#include <vector>
#include <forward_list>
#include <chrono>
#include <cstdint>
#include <memory>
#include <unordered_set>
#include <map>
#include <unordered_map>
//#include <boost/multiprecision/cpp_int.hpp>
#include <cmath>
#include <string>
#include <cstring>
#include <sys/stat.h>
#include <boost/bimap/bimap.hpp> //Bidirectional map
using namespace std;
using namespace std;
using namespace std::chrono;
//Type definition for all project
typedef uint16_t size_seq; //max 2^16=65.536
typedef uint64_t size_seq_tot; //max 2^64=1,844674407×10¹⁹
typedef uint32_t id_seq_type;
typedef uint16_t id_specie_type;
typedef uint32_t id_grp_type;
typedef uint16_t id_cluster_type;
//typedef uint128_t hash_type; //max 2^128=4^64=3,402823669×10³⁸
typedef uint64_t hash_type; //max 2^64=1,844674407×10¹⁹
typedef pair<hash_type, bool> HashCorrect;
typedef uint16_t lMer_type;
//For sequences
enum SeedState {No_State_Seed, No_Seed, Seed, No_Other_Read};
typedef unordered_map<id_seq_type, size_seq> MapAdjSeqID;
enum TypeGraph {Paired = 0, Single, SingleUnion};
typedef pair<string, string> SequenceHeader;
typedef map<id_seq_type, SequenceHeader> MapIDFile_Header;
//////////////////////////////////////////////////////////////////////////////////////////////////////
//Necessari per compilazione codice Assessment.h e Assessment.cpp
typedef map<id_specie_type, id_seq_type> Map_Specie__NumRead;
typedef map<id_grp_type, id_seq_type> Map_Grp__Size;
typedef map<id_grp_type, Map_Specie__NumRead> Map_Grp__Map_Specie__IDSeq;
typedef map<id_grp_type, Map_Specie__NumRead::value_type> Map_Grp__Max_Pair_Specie__IDSeq;
typedef map<id_cluster_type, id_seq_type> Map_Cluster__Size;
typedef map<id_cluster_type, Map_Specie__NumRead> Map_Cluster__Map_Specie__IDSeq;
typedef map<id_cluster_type, Map_Specie__NumRead::value_type> Map_Cluster__Max_Pair_Specie__IDSeq;
typedef map<id_specie_type, unordered_set<id_seq_type>> Map_Specie__SetIdSeq;
//////////////////////////////////////////////////////////////////////////////////////////////////////
struct Lmer
{
typedef shared_ptr<Lmer> Ptr;
string l_mer;
size_seq count = 1; //1 = non ha in sé il complemento inverso, 2 = ha il complemento inverso
double prob = 0;
};
struct LMerVectorCompress
{
typedef shared_ptr<LMerVectorCompress> Ptr;
typedef map<lMer_type, lMer_type> Map_HashLMer_IndexVector;
LMerVectorCompress(size_seq L);
const Lmer::Ptr& GetWithIndex(lMer_type index);
const Lmer::Ptr& GetWithHash(lMer_type hash);
lMer_type GetIndexWithHash(lMer_type hash);
size_seq getL() const;
const Map_HashLMer_IndexVector& getMapHash() const;
const vector<Lmer::Ptr>& getLmer() const;
private:
size_seq L;
Map_HashLMer_IndexVector mapHash;
vector<Lmer::Ptr> vLmer;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////
class Spaced_Qmer {
public:
//For unit
struct Position_NOneBefore {
size_t pos_start = 0;
size_t n_one_before = 0;//numero di 1 che precedono il primo 1 dello unit
};
struct NOne_Position_NOneBefore {
size_t n_one = 0;
vector<Position_NOneBefore> v_pos_bef;
};
typedef vector<NOne_Position_NOneBefore> V_Unit;
//for shift
typedef vector<size_t> Position;
struct PreviusShiftMinChange {
vector<Position> one_to_change; //pos_to_change[i] -> i = index dello shift, il contenuto indica l'indice rispetto al vettore pos_one di quali uno cambiare
Position one_exit; //numero uno da shiftare
Position pos_min; //shift precedente migliore da cui computare hash
};
Spaced_Qmer();
Spaced_Qmer(string spaced_qmer);
inline size_t GetWeight() const {
return this->pos_one.size();
}
inline size_t GetQ() const {
return this->spaced_q.length();
}
inline bool isOne(size_t index) const {
return this->spaced_q[index] == '1';
}
inline const Position& GetPosOne() const {
return this->pos_one;
}
inline const V_Unit& GetUnitV() const {
return this->unit_map;
}
inline const PreviusShiftMinChange& GetShiftMinChange() const {
return this->shift_min_change;
}
inline const string& toString() const {
return this->spaced_q;
}
void reset(string spaced_qmer);
private:
string spaced_q;
Position pos_one;
V_Unit unit_map;
PreviusShiftMinChange shift_min_change;
void SaveIndexOne();
void GetUnitV(V_Unit& v_unit);
void GetShiftMax(PreviusShiftMinChange& shift_max);
};
//////////////////////////////////////////////////////////////////////////////////////////////////////
inline static hash_type CharToInt(char ch)
{
if(ch == 'A')
return 0;
if(ch == 'C')
return 1;
if(ch == 'G')
return 2;
if(ch == 'T')
return 3;
return 4; //ERROR CODE
}
inline static hash_type CharToIntComplement(char ch)
{
if(ch == 'A')
return 3;
if(ch == 'C')
return 2;
if(ch == 'G')
return 1;
if(ch == 'T')
return 0;
return 4; //ERROR CODE
}
//Hash per tutti 1 su spaced qmer
inline static void GetHash(const string& s_Str, size_t startQmer, size_t length,
HashCorrect& hash, hash_type (*fConvertion)(char))
{
hash.first = 0;
hash.second = true;
// #pragma omp parallel for ordered
for(size_t i = startQmer; i < startQmer + length; ++i)
{
hash_type ch = (*fConvertion)(s_Str[i]);
// #pragma omp ordered
if(hash.second)
{
if(ch == 4) //Errore conversione
hash.second = false;
if(hash.second)
hash.first |= ch << ((i - startQmer) * 2);//OR possibile perchè sommo potenze di 4, OR su posizioni diverse, non c'è riporto
}
}
if(!hash.second)
hash.first = 0;
}
//Hash per spaced qmer con *
inline static void GetHash(const string& s_Str, size_t startQmer, const Spaced_Qmer& spaced_qmer,
HashCorrect& hash, hash_type (*fConvertion)(char))
{
hash.first = 0;
hash.second = true;
const Spaced_Qmer::Position& pos_one = spaced_qmer.GetPosOne();
for(size_t j = 0; j < pos_one.size(); ++j)
{
hash_type ch = (*fConvertion)(s_Str[startQmer+pos_one[j]]);
if(hash.second)
{
if(ch == 4) //Errore conversione
hash.second = false;
if(hash.second)
hash.first |= ch << (j * 2);//OR possibile perchè sommo potenze di 4, OR su posizioni diverse, non c'è riporto
}
}
if(!hash.second)
hash.first = 0;
}
//Hash veloce con spaced qmer tutti 1
inline static void GetHashes_speedup_previous(const string& s_Str, size_t length,
vector<HashCorrect>& vHash, hash_type (*fConvertion)(char))
{
if(s_Str.size() >= length)
{
size_t n_hashes = s_Str.size() - length + 1;
vHash.resize(n_hashes, HashCorrect(0, true)); //Crea vettore
vector<size_t> err(n_hashes, 0); //Vettore che conta errori su qmer
//Computa vettore che mi indica quanti errori ci sono nei qmer, quindi poi posso decidere se calcolare o no.
// #pragma omp parallel for ordered
for(size_t pos=0; pos < s_Str.size(); ++pos)
{
bool newErr = (*fConvertion)(s_Str[pos]) == 4;
// #pragma omp ordered
if(pos < length)
{
if(newErr)
++err[0];
}
else
{
size_t actual = pos-length+1;
size_t prev = pos-length;
bool exitErr = (*fConvertion)(s_Str[prev]) == 4;
err[actual] = err[prev];
if(exitErr)
--err[actual];
if(newErr)
++err[actual];
}
}
//Imposto se computare o no l'hash
#pragma omp parallel for
for(size_t pos=0; pos < vHash.size(); ++pos)
{
if(err[pos] > 0)
{
vHash[pos].first = 0;
vHash[pos].second = false;
}
}
if(!vHash.empty())
{
GetHash(s_Str, 0, length, vHash[0], fConvertion);//primo da computare a parte
for(size_t pos=1; pos < vHash.size(); ++pos)
{
if(vHash[pos].second) //Se devo computare
{
if(!vHash[pos-1].second)
GetHash(s_Str, pos, length, vHash[pos], fConvertion);
else
{
vHash[pos].first = vHash[pos - 1].first;
vHash[pos].first -= (*fConvertion)(s_Str[pos - 1]); //sottrai primo elemento che viene eliminato
vHash[pos].first >>= 2; //dividi per 4, sposta 2 bit
vHash[pos].first |= ((*fConvertion)(s_Str[pos + length - 1]) << ((length - 1) * 2)); //aggiungi ultimo elemento OR possibile perchè prima ho
//diviso per 4 e la posizione dove scrivo ha sicuramente 0
}
}
}
}
}
}
inline static void GetHashes_naive(const string& s_Str, const Spaced_Qmer& spaced_qmer,
vector<HashCorrect>& vHash, hash_type (*fConvertion)(char))
{
bool isAllOne = spaced_qmer.GetWeight() == spaced_qmer.GetQ();
if(isAllOne)
GetHashes_speedup_previous(s_Str, spaced_qmer.GetQ(), vHash, fConvertion);
else
{
if(s_Str.size() >= spaced_qmer.GetQ())
{
size_t n_hashes = s_Str.size() - spaced_qmer.GetQ() + 1;
vHash.resize(n_hashes, HashCorrect(0, true)); //Crea vettore
#pragma omp parallel for
for(size_t pos=0; pos < vHash.size(); ++pos)
GetHash(s_Str, pos, spaced_qmer, vHash[pos], fConvertion);
}
}
}
inline static void GetHashes_speedup_unit(const string& s_Str, const Spaced_Qmer& spaced_qmer, vector<HashCorrect>& vHash, hash_type (*fConvertion)(char)) {
bool isAllOne = spaced_qmer.GetWeight() == spaced_qmer.GetQ();
if(isAllOne)
GetHashes_speedup_previous(s_Str, spaced_qmer.GetQ(), vHash, fConvertion);
else
{
if(s_Str.size() >= spaced_qmer.GetQ())
{
const Spaced_Qmer::V_Unit& spaced_v = spaced_qmer.GetUnitV();
//Get hash v for all unit present
//TODO: si può migliorare, dato che ordinate si può prendere quelle più piccole per comporre quelle più grandi
vector<vector<HashCorrect>> hash_v(spaced_v.size());
#pragma omp parallel for
for(size_t i = 0; i < spaced_v.size(); ++i)//parallel computation
GetHashes_speedup_previous(s_Str, spaced_v[i].n_one, hash_v[i], fConvertion);
//Combine different hash
size_t n_hashes = s_Str.size() - spaced_qmer.GetQ() + 1;
vHash.resize(n_hashes, HashCorrect(0, true)); //Crea vettore
#pragma omp parallel for
for(size_t i = 0; i < vHash.size(); ++i)
{
for(size_t j = 0; j < spaced_v.size(); ++j)
{
vector<HashCorrect>& hash_unit = hash_v[j];
for(size_t h = 0; h < spaced_v[j].v_pos_bef.size(); ++h)
{
size_t pos_unit = spaced_v[j].v_pos_bef[h].pos_start;
size_t shift = spaced_v[j].v_pos_bef[h].n_one_before*2;
HashCorrect& hash = hash_unit[i+pos_unit];
if(vHash[i].second && hash.second)
vHash[i].first |= (hash.first << shift);
else
{
vHash[i].first = 0;
vHash[i].second = false;
}
}
}
}
}
}
}
inline static void GetHashes_speedup_previous(const string& s_Str, const Spaced_Qmer& spaced_qmer, vector<HashCorrect>& vHash, hash_type (*fConvertion)(char)) {
bool isAllOne = spaced_qmer.GetWeight() == spaced_qmer.GetQ();
if(isAllOne)
GetHashes_speedup_previous(s_Str, spaced_qmer.GetQ(), vHash, fConvertion);
else
{
auto get_hash = [](const string& s_Str, const Spaced_Qmer& spaced_qmer, vector<HashCorrect>& vHash,
vector<Spaced_Qmer::Position>& vErr,
const Spaced_Qmer::PreviusShiftMinChange& shift, size_t index_prev,
const Spaced_Qmer::Position& pos_one, size_t i, hash_type (*fConvertion)(char)){
size_t shift_of_one = shift.one_exit[index_prev];
const Spaced_Qmer::Position one_to_change = shift.one_to_change[index_prev];//Recupera uno da cambiare
size_t pos_hash_get = i-index_prev;//la posizione dell'hash presa è la posizione attuale meno l'indice dello shift dove si fan meno cambiamenti
if(vHash[pos_hash_get].second)//se quello da cui devo prendere è corretto computa
{
//copia hash
vHash[i] = vHash[pos_hash_get]; //Copia hash
vHash[i].first >>= 2*shift_of_one;//Shifta correttamente
//Controlla se attualmente hash è corretto
Spaced_Qmer::Position& err = vErr[pos_hash_get];
for(size_t e = 0; e < err.size(); ++e)
if(err[e]>=shift_of_one)//1 non esce
vErr[i].push_back(err[e]-shift_of_one);
vHash[i].second = vErr[i].empty();
//reset one
hash_type reset_one = 0;
for(size_t j = 0; j < one_to_change.size(); ++j)
reset_one |= (hash_type)3 << (one_to_change[j] * 2);
vHash[i].first &= ~reset_one;
//aggiorna rimanenti posizioni su hash
for(size_t j = 0; j < one_to_change.size(); ++j)
{
size_t index_char = i+pos_one[one_to_change[j]];
hash_type ch = (*fConvertion)(s_Str[index_char]);
if(ch == 4) //Errore conversione
{
vHash[i].second = false;
vErr[i].push_back(one_to_change[j]);
}
if(vHash[i].second)
vHash[i].first |= ch << (one_to_change[j] * 2);//OR possibile perchè sommo potenze di 4, OR su posizioni diverse, non c'è riporto
}
}
else //computa da zero
GetHash(s_Str, i, spaced_qmer, vHash[i], fConvertion);
};
if(s_Str.size() >= spaced_qmer.GetQ())
{
const Spaced_Qmer::PreviusShiftMinChange& shift = spaced_qmer.GetShiftMinChange();
const Spaced_Qmer::Position& pos_one = spaced_qmer.GetPosOne();
//Compute hash
size_t n_hashes = s_Str.size() - spaced_qmer.GetQ() + 1;
vHash.resize(n_hashes, HashCorrect(0, true)); //Crea vettore
vector<Spaced_Qmer::Position> vErr(n_hashes);
if(!vHash.empty())
{
GetHash(s_Str, 0, spaced_qmer, vHash[0], fConvertion);//primo da computare a parte
for(size_t i = 1; i < shift.pos_min.size() && i < vHash.size(); ++i)//Per tutte le posizioni che contemplano gli shift nel primo pezzo di sequenza
get_hash(s_Str, spaced_qmer, vHash, vErr, shift, shift.pos_min[i], pos_one, i, fConvertion);
for(size_t i = shift.pos_min.size(); i < vHash.size(); ++i)
get_hash(s_Str, spaced_qmer, vHash, vErr, shift, shift.pos_min.back(), pos_one, i, fConvertion);
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
void GetKmer(hash_type index, size_seq K, string& Kmer);
vector<string> GetAllKmers(size_seq K);
void createDirAndSubDir(string path);
int parseLineForMemory(char* line);
int getVirtualMemoryUsed();
int getPeakVirtualMemoryUsed();
int getPhysicalMemoryUsed();
#endif
|
GB_unop__identity_int32_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_fc64)
// op(A') function: GB (_unop_tran__identity_int32_fc64)
// C type: int32_t
// A type: GxB_FC64_t
// cast: int32_t cij = GB_cast_to_int32_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = GB_cast_to_int32_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = GB_cast_to_int32_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_fc64)
(
int32_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t (creal (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t (creal (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ast-dump-openmp-section.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp sections
{
#pragma omp section
;
}
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-section.c:3:1, line:9:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:9:1>
// CHECK-NEXT: `-OMPSectionsDirective {{.*}} <line:4:1, col:21>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3, line:8:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: |-CompoundStmt {{.*}} <line:5:3, line:8:3>
// CHECK-NEXT: | `-OMPSectionDirective {{.*}} <line:6:1, col:20>
// CHECK-NEXT: | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-section.c:4:1) *const restrict'
|
RandomForestEvaluator.h | /**
* raflevaluation: RandomForestEvaluator.h
* Copyright (c) Torr Vision Group, University of Oxford, 2015. All rights reserved.
*/
#ifndef H_RAFLEVALUATION_RANDOMFORESTEVALUATOR
#define H_RAFLEVALUATION_RANDOMFORESTEVALUATOR
#include <boost/assign/list_of.hpp>
#include <evaluation/core/LearnerEvaluator.h>
#include <evaluation/core/PerformanceMeasure.h>
#include <evaluation/core/PerformanceMeasureUtil.h>
#include <evaluation/util/ConfusionMatrixUtil.h>
#include <rafl/core/RandomForest.h>
#include <tvgutil/containers/MapUtil.h>
namespace raflevaluation {
/**
* \brief An instance of this class can be used to evaluate a random forest using approaches based on example set splitting.
*/
template <typename Label>
class RandomForestEvaluator : public evaluation::LearnerEvaluator<rafl::Example<Label>,PerformanceResult>
{
//#################### TYPEDEFS AND USINGS ####################
private:
typedef evaluation::LearnerEvaluator<rafl::Example<Label>,PerformanceResult> Base;
using typename Base::Example_CPtr;
using typename Base::ResultType;
typedef rafl::DecisionTree<Label> DecisionTree;
typedef rafl::RandomForest<Label> RandomForest;
typedef boost::shared_ptr<RandomForest> RandomForest_Ptr;
//#################### PRIVATE VARIABLES ####################
private:
/** The settings to use for the random forest. */
std::map<std::string,std::string> m_settings;
/** The maximum number of nodes per tree that may be split in each training step. */
size_t m_splitBudget;
/** The number of decision trees to use in the random forest. */
size_t m_treeCount;
//#################### CONSTRUCTORS ####################
public:
/**
* \brief Constructs a random forest evaluator.
*
* \param splitGenerator The generator to use to split the example set.
* \param settings The settings to use for the random forest.
*/
explicit RandomForestEvaluator(const evaluation::SplitGenerator_Ptr& splitGenerator, const std::map<std::string,std::string>& settings)
: Base(splitGenerator), m_settings(settings)
{
#define GET_SETTING(param) tvgutil::MapUtil::typed_lookup(settings, #param, m_##param);
GET_SETTING(splitBudget);
GET_SETTING(treeCount);
#undef GET_SETTING
}
//#################### PROTECTED MEMBER FUNCTIONS ####################
protected:
/** Override */
virtual PerformanceResult average_results(const std::vector<PerformanceResult>& results) const
{
return PerformanceMeasureUtil::average_results(results);
}
/** Override */
virtual ResultType evaluate_on_split(const std::vector<Example_CPtr>& examples, const evaluation::SplitGenerator::Split& split) const
{
// Make a random forest using the specified settings and add the examples in the training set to it.
RandomForest_Ptr randomForest(new RandomForest(m_treeCount, typename DecisionTree::Settings(m_settings)));
randomForest->add_examples(examples, split.first);
// Train the forest.
randomForest->train(m_splitBudget);
// Return the results of evaluating the forest on the validation set.
return do_evaluation(randomForest, examples, split.second);
}
//#################### PRIVATE STATIC MEMBER FUNCTIONS ####################
private:
/**
* \brief Evaluates a random forest on a subset of a set of examples.
*
* \param randomForest The random forest.
* \param examples The overall set of examples from which the subset of evaluation examples is drawn.
* \param indices The indices of the subset of examples on which to evaluate the random forest.
* \return The results of the evaluation.
*/
static ResultType do_evaluation(const RandomForest_Ptr& randomForest, const std::vector<Example_CPtr>& examples, const std::vector<size_t>& indices)
{
std::set<Label> classLabels;
int indicesSize = static_cast<int>(indices.size());
std::vector<Label> expectedLabels(indicesSize), predictedLabels(indicesSize);
#ifdef WITH_OPENMP
#pragma omp parallel for
#endif
for(int i = 0; i < indicesSize; ++i)
{
const Example_CPtr& example = examples[indices[i]];
predictedLabels[i] = randomForest->predict(example->get_descriptor());
expectedLabels[i] = example->get_label();
#ifdef WITH_OPENMP
#pragma omp critical
#endif
classLabels.insert(expectedLabels[i]);
}
Eigen::MatrixXf confusionMatrix = ConfusionMatrixUtil::make_confusion_matrix(classLabels, expectedLabels, predictedLabels);
return boost::assign::map_list_of("Accuracy", ConfusionMatrixUtil::calculate_accuracy(ConfusionMatrixUtil::normalise_rows_L1(confusionMatrix)));
}
};
}
#endif
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
For_Paralelo.c |
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#define TRUE 1
#define FALSE 0
#else
#define omp_get_thread_num() 0
#endif
int main()
{
#ifdef _OPENMP
(void) omp_set_dynamic(FALSE);
if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");}
(void) omp_set_num_threads(4);
#endif
int i, n = 9;
#pragma omp parallel default(none) shared(n) private(i)
{
#pragma omp for
for (i=0; i<n; i++)
printf("El hilo %d ejecuta la iteracion %d\n",
omp_get_thread_num(),i);
} // Final de la region paralela
return(0);
}
|
aux_parcsr_matrix.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_AuxParCSRMatrix class.
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "aux_parcsr_matrix.h"
/*--------------------------------------------------------------------------
* hypre_AuxParCSRMatrixCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParCSRMatrixCreate( hypre_AuxParCSRMatrix **aux_matrix,
HYPRE_Int local_num_rows,
HYPRE_Int local_num_cols,
HYPRE_Int *sizes )
{
hypre_AuxParCSRMatrix *matrix;
matrix = hypre_CTAlloc(hypre_AuxParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixLocalNumRows(matrix) = local_num_rows;
hypre_AuxParCSRMatrixLocalNumRownnz(matrix) = local_num_rows;
hypre_AuxParCSRMatrixLocalNumCols(matrix) = local_num_cols;
hypre_AuxParCSRMatrixRowSpace(matrix) = sizes;
/* set defaults */
hypre_AuxParCSRMatrixNeedAux(matrix) = 1;
hypre_AuxParCSRMatrixMaxOffProcElmts(matrix) = 0;
hypre_AuxParCSRMatrixCurrentOffProcElmts(matrix) = 0;
hypre_AuxParCSRMatrixOffProcIIndx(matrix) = 0;
hypre_AuxParCSRMatrixRownnz(matrix) = NULL;
hypre_AuxParCSRMatrixRowLength(matrix) = NULL;
hypre_AuxParCSRMatrixAuxJ(matrix) = NULL;
hypre_AuxParCSRMatrixAuxData(matrix) = NULL;
hypre_AuxParCSRMatrixIndxDiag(matrix) = NULL;
hypre_AuxParCSRMatrixIndxOffd(matrix) = NULL;
hypre_AuxParCSRMatrixDiagSizes(matrix) = NULL;
hypre_AuxParCSRMatrixOffdSizes(matrix) = NULL;
/* stash for setting or adding on/off-proc values */
hypre_AuxParCSRMatrixOffProcI(matrix) = NULL;
hypre_AuxParCSRMatrixOffProcJ(matrix) = NULL;
hypre_AuxParCSRMatrixOffProcData(matrix) = NULL;
hypre_AuxParCSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrixMaxStackElmts(matrix) = 0;
hypre_AuxParCSRMatrixCurrentStackElmts(matrix) = 0;
hypre_AuxParCSRMatrixStackI(matrix) = NULL;
hypre_AuxParCSRMatrixStackJ(matrix) = NULL;
hypre_AuxParCSRMatrixStackData(matrix) = NULL;
hypre_AuxParCSRMatrixStackSorA(matrix) = NULL;
hypre_AuxParCSRMatrixUsrOnProcElmts(matrix) = -1;
hypre_AuxParCSRMatrixUsrOffProcElmts(matrix) = -1;
hypre_AuxParCSRMatrixInitAllocFactor(matrix) = 5.0;
hypre_AuxParCSRMatrixGrowFactor(matrix) = 2.0;
#endif
*aux_matrix = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AuxParCSRMatrixDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParCSRMatrixDestroy( hypre_AuxParCSRMatrix *matrix )
{
HYPRE_Int num_rownnz;
HYPRE_Int num_rows;
HYPRE_Int *rownnz;
HYPRE_Int i;
if (matrix)
{
rownnz = hypre_AuxParCSRMatrixRownnz(matrix);
num_rownnz = hypre_AuxParCSRMatrixLocalNumRownnz(matrix);
num_rows = hypre_AuxParCSRMatrixLocalNumRows(matrix);
if (hypre_AuxParCSRMatrixAuxJ(matrix))
{
if (hypre_AuxParCSRMatrixRownnz(matrix))
{
for (i = 0; i < num_rownnz; i++)
{
hypre_TFree(hypre_AuxParCSRMatrixAuxJ(matrix)[rownnz[i]], HYPRE_MEMORY_HOST);
}
}
else
{
for (i = 0; i < num_rows; i++)
{
hypre_TFree(hypre_AuxParCSRMatrixAuxJ(matrix)[i], HYPRE_MEMORY_HOST);
}
}
hypre_TFree(hypre_AuxParCSRMatrixAuxJ(matrix), HYPRE_MEMORY_HOST);
}
if (hypre_AuxParCSRMatrixAuxData(matrix))
{
if (hypre_AuxParCSRMatrixRownnz(matrix))
{
for (i = 0; i < num_rownnz; i++)
{
hypre_TFree(hypre_AuxParCSRMatrixAuxData(matrix)[rownnz[i]], HYPRE_MEMORY_HOST);
}
hypre_TFree(hypre_AuxParCSRMatrixAuxData(matrix), HYPRE_MEMORY_HOST);
}
else
{
for (i = 0; i < num_rows; i++)
{
hypre_TFree(hypre_AuxParCSRMatrixAuxData(matrix)[i], HYPRE_MEMORY_HOST);
}
hypre_TFree(hypre_AuxParCSRMatrixAuxData(matrix), HYPRE_MEMORY_HOST);
}
}
hypre_TFree(hypre_AuxParCSRMatrixRownnz(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixRowLength(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixRowSpace(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixIndxDiag(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixIndxOffd(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixDiagSizes(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixOffdSizes(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixOffProcI(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixOffProcJ(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixOffProcData(matrix), HYPRE_MEMORY_HOST);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_TFree(hypre_AuxParCSRMatrixStackI(matrix), hypre_AuxParCSRMatrixMemoryLocation(matrix));
hypre_TFree(hypre_AuxParCSRMatrixStackJ(matrix), hypre_AuxParCSRMatrixMemoryLocation(matrix));
hypre_TFree(hypre_AuxParCSRMatrixStackData(matrix), hypre_AuxParCSRMatrixMemoryLocation(matrix));
hypre_TFree(hypre_AuxParCSRMatrixStackSorA(matrix), hypre_AuxParCSRMatrixMemoryLocation(matrix));
#endif
hypre_TFree(matrix, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AuxParCSRMatrixSetRownnz
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParCSRMatrixSetRownnz( hypre_AuxParCSRMatrix *matrix )
{
HYPRE_Int local_num_rows = hypre_AuxParCSRMatrixLocalNumRows(matrix);
HYPRE_Int *row_space = hypre_AuxParCSRMatrixRowSpace(matrix);
HYPRE_Int num_rownnz_old = hypre_AuxParCSRMatrixLocalNumRownnz(matrix);
HYPRE_Int *rownnz_old = hypre_AuxParCSRMatrixRownnz(matrix);
HYPRE_Int *rownnz;
HYPRE_Int i, ii, local_num_rownnz;
/* Count number of nonzero rows */
local_num_rownnz = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:local_num_rownnz) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < local_num_rows; i++)
{
if (row_space[i] > 0)
{
local_num_rownnz++;
}
}
if (local_num_rownnz != local_num_rows)
{
rownnz = hypre_CTAlloc(HYPRE_Int, local_num_rownnz, HYPRE_MEMORY_HOST);
/* Find nonzero rows */
local_num_rownnz = 0;
for (i = 0; i < local_num_rows; i++)
{
if (row_space[i] > 0)
{
rownnz[local_num_rownnz++] = i;
}
}
/* Free memory if necessary */
if (rownnz_old && rownnz && (local_num_rownnz < num_rownnz_old))
{
ii = 0;
for (i = 0; i < num_rownnz_old; i++)
{
if (rownnz_old[i] == rownnz[ii])
{
ii++;
}
else
{
hypre_TFree(hypre_AuxParCSRMatrixAuxJ(matrix)[rownnz_old[i]], HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixAuxData(matrix)[rownnz_old[i]], HYPRE_MEMORY_HOST);
}
if (ii == local_num_rownnz)
{
i = i + 1;
for (; i < num_rownnz_old; i++)
{
hypre_TFree(hypre_AuxParCSRMatrixAuxJ(matrix)[rownnz_old[i]],
HYPRE_MEMORY_HOST);
hypre_TFree(hypre_AuxParCSRMatrixAuxData(matrix)[rownnz_old[i]],
HYPRE_MEMORY_HOST);
}
break;
}
}
}
hypre_TFree(rownnz_old, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixLocalNumRownnz(matrix) = local_num_rownnz;
hypre_AuxParCSRMatrixRownnz(matrix) = rownnz;
}
else
{
hypre_TFree(rownnz_old, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixLocalNumRownnz(matrix) = local_num_rows;
hypre_AuxParCSRMatrixRownnz(matrix) = NULL;
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AuxParCSRMatrixInitialize_v2
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParCSRMatrixInitialize_v2( hypre_AuxParCSRMatrix *matrix,
HYPRE_MemoryLocation memory_location )
{
HYPRE_Int local_num_rows = hypre_AuxParCSRMatrixLocalNumRows(matrix);
HYPRE_Int max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(matrix);
hypre_AuxParCSRMatrixMemoryLocation(matrix) = memory_location;
if (local_num_rows < 0)
{
return -1;
}
if (local_num_rows == 0)
{
return 0;
}
/* WM: Q - added the macro guards here (since IJ assembly not yet ported to sycl)... is this OK/correct? */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (memory_location != HYPRE_MEMORY_HOST)
{
/* GPU assembly */
hypre_AuxParCSRMatrixNeedAux(matrix) = 1;
}
else
#endif
{
/* CPU assembly */
/* allocate stash for setting or adding off processor values */
if (max_off_proc_elmts > 0)
{
hypre_AuxParCSRMatrixOffProcI(matrix) = hypre_CTAlloc(HYPRE_BigInt, 2 * max_off_proc_elmts,
HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts,
HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(matrix) = hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts,
HYPRE_MEMORY_HOST);
}
if (hypre_AuxParCSRMatrixNeedAux(matrix))
{
HYPRE_Int *row_space = hypre_AuxParCSRMatrixRowSpace(matrix);
HYPRE_Int *rownnz = hypre_AuxParCSRMatrixRownnz(matrix);
HYPRE_BigInt **aux_j = hypre_CTAlloc(HYPRE_BigInt *, local_num_rows, HYPRE_MEMORY_HOST);
HYPRE_Complex **aux_data = hypre_CTAlloc(HYPRE_Complex *, local_num_rows, HYPRE_MEMORY_HOST);
HYPRE_Int local_num_rownnz;
HYPRE_Int i, ii;
if (row_space)
{
/* Count number of nonzero rows */
local_num_rownnz = 0;
for (i = 0; i < local_num_rows; i++)
{
if (row_space[i] > 0)
{
local_num_rownnz++;
}
}
if (local_num_rownnz != local_num_rows)
{
rownnz = hypre_CTAlloc(HYPRE_Int, local_num_rownnz, HYPRE_MEMORY_HOST);
/* Find nonzero rows */
local_num_rownnz = 0;
for (i = 0; i < local_num_rows; i++)
{
if (row_space[i] > 0)
{
rownnz[local_num_rownnz++] = i;
}
}
hypre_AuxParCSRMatrixLocalNumRownnz(matrix) = local_num_rownnz;
hypre_AuxParCSRMatrixRownnz(matrix) = rownnz;
}
}
if (!hypre_AuxParCSRMatrixRowLength(matrix))
{
hypre_AuxParCSRMatrixRowLength(matrix) = hypre_CTAlloc(HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST);
}
if (row_space)
{
if (local_num_rownnz != local_num_rows)
{
for (i = 0; i < local_num_rownnz; i++)
{
ii = rownnz[i];
aux_j[ii] = hypre_CTAlloc(HYPRE_BigInt, row_space[ii], HYPRE_MEMORY_HOST);
aux_data[ii] = hypre_CTAlloc(HYPRE_Complex, row_space[ii], HYPRE_MEMORY_HOST);
}
}
else
{
for (i = 0; i < local_num_rows; i++)
{
aux_j[i] = hypre_CTAlloc(HYPRE_BigInt, row_space[i], HYPRE_MEMORY_HOST);
aux_data[i] = hypre_CTAlloc(HYPRE_Complex, row_space[i], HYPRE_MEMORY_HOST);
}
}
}
else
{
row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
for (i = 0; i < local_num_rows; i++)
{
row_space[i] = 30;
aux_j[i] = hypre_CTAlloc(HYPRE_BigInt, 30, HYPRE_MEMORY_HOST);
aux_data[i] = hypre_CTAlloc(HYPRE_Complex, 30, HYPRE_MEMORY_HOST);
}
hypre_AuxParCSRMatrixRowSpace(matrix) = row_space;
}
hypre_AuxParCSRMatrixAuxJ(matrix) = aux_j;
hypre_AuxParCSRMatrixAuxData(matrix) = aux_data;
}
else
{
hypre_AuxParCSRMatrixIndxDiag(matrix) = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixIndxOffd(matrix) = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
}
return hypre_error_flag;
}
HYPRE_Int
hypre_AuxParCSRMatrixInitialize(hypre_AuxParCSRMatrix *matrix)
{
if (matrix)
{
return hypre_AuxParCSRMatrixInitialize_v2(matrix, hypre_AuxParCSRMatrixMemoryLocation(matrix));
}
return -2;
}
|
layerramsubset.h | /*********************************************************************************
*
* Inviwo - Interactive Visualization Workshop
*
* Copyright (c) 2018-2019 Inviwo Foundation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************************/
#ifndef IVW_LAYERRAMSUBSET_H
#define IVW_LAYERRAMSUBSET_H
#include <modules/base/basemoduledefine.h>
#include <inviwo/core/common/inviwo.h>
#include <inviwo/core/datastructures/image/layer.h>
#include <inviwo/core/datastructures/image/layerram.h>
#include <inviwo/core/datastructures/image/layerramprecision.h>
#include <inviwo/core/util/glm.h>
#include <algorithm>
namespace inviwo {
namespace util {
/**
* \brief extracts a subregion from a layer and returns it as a new layer
*
* This function extracts a subregion given by offset and extent from the input layer.
* If border clamping is enabled, the output region will be clamped to lie completely within the
* source layer. Otherwise (default), the areas outside the source layer will be filled with
* zeros.
*
* @param in input layer
* @param offset subregion offset in input layer
* @param extent extent (width and height) of subregion
* @param clampBorderOutsideImage if true, the output region is clamped to the layer boundaries
* @return std::shared_ptr<LayerRAM>
*/
IVW_MODULE_BASE_API std::shared_ptr<LayerRAM> layerSubSet(const Layer* in, ivec2 offset,
size2_t extent,
bool clampBorderOutsideImage = false);
/**
* \brief extracts a subregion from a layer and converts it into a new layer
*
* This function extracts a subregion given by offset and extent from the input layer. The values
* will be converted to type T using util::glm_convert_normalized.
* If border clamping is enabled, the output region will be clamped to lie completely within the
* source layer. Otherwise (default), the areas outside the source layer will be filled with
* zeros.
*
* @param in input layer
* @param offset subregion offset in input layer
* @param extent extent (width and height) of subregion
* @param clampBorderOutsideImage if true, the output region is clamped to the layer boundaries
* @return std::shared_ptr<LayerRAMPrecision<T>>
*/
template <typename T>
std::shared_ptr<LayerRAMPrecision<T>> layerSubSet(const Layer* in, ivec2 offset, size2_t extent,
bool clampBorderOutsideImage = false);
namespace detail {
template <typename T>
void conversionCopy(const T* src, T* dst, size_t len) {
std::copy(src, src + len, dst);
}
template <typename To, typename From>
void conversionCopy(const From* src, To* dst, size_t len) {
for (size_t i = 0; i < len; i++) {
dst[i] = util::glm_convert_normalized<To, From>(src[i]);
}
}
template <typename T, typename U = T>
std::shared_ptr<LayerRAMPrecision<U>> extractLayerSubSet(const LayerRAMPrecision<T>* inLayer,
ivec2 offset, size2_t extent,
bool clampBorderOutsideImage) {
// determine parameters
const ivec2 srcDim(inLayer->getDimensions());
// adjust the output dimensions to match the intersection of output and input regions
const ivec2 srcOffset(glm::max(ivec2(0), offset));
const ivec2 dstOffset = clampBorderOutsideImage ? ivec2(0) : (glm::max(ivec2(0), -offset));
// clamp copy extent to source layer
const ivec2 copyExtent = glm::min(ivec2(extent) - dstOffset, srcDim - srcOffset);
const ivec2 dstDim = clampBorderOutsideImage ? copyExtent : ivec2(extent);
// allocate space
auto newLayer = std::make_shared<LayerRAMPrecision<U>>(dstDim);
const auto src = inLayer->getDataTyped();
auto dst = newLayer->getDataTyped();
if (!clampBorderOutsideImage) {
// clear entire layer as only parts will be copied
std::fill(dst, dst + dstDim.x * dstDim.y, U(0));
}
// memcpy each row to form sub layer
#pragma omp parallel for
for (int j = 0; j < copyExtent.y; j++) {
size_t srcPos = (j + srcOffset.y) * srcDim.x + srcOffset.x;
size_t dstPos = (j + dstOffset.y) * dstDim.x + dstOffset.x;
conversionCopy(src + srcPos, dst + dstPos, static_cast<size_t>(copyExtent.x));
}
return newLayer;
}
} // namespace detail
} // namespace util
template <typename T>
std::shared_ptr<LayerRAMPrecision<T>> util::layerSubSet(const Layer* in, ivec2 offset,
size2_t extent,
bool clampBorderOutsideImage) {
return in->getRepresentation<LayerRAM>()->dispatch<std::shared_ptr<LayerRAMPrecision<T>>>(
[offset, extent, clampBorderOutsideImage](auto layerpr) {
using ValueType = util::PrecisionValueType<decltype(layerpr)>;
return util::detail::extractLayerSubSet<ValueType, T>(layerpr, offset, extent,
clampBorderOutsideImage);
});
}
} // namespace inviwo
#endif // IVW_LAYERRAMSUBSET_H
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
join.c | /* Copyright 2013-2015. The Regents of the University of California.
* Copyright 2015. Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2013, 2015 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2015 Jonathan Tamir <jtamir@eecs.berkeley.edu>
*/
#include <stdbool.h>
#include <complex.h>
#include <string.h>
#include <unistd.h>
#include "num/multind.h"
#include "num/init.h"
#include "misc/mmio.h"
#include "misc/debug.h"
#include "misc/misc.h"
#include "misc/opts.h"
#ifndef DIMS
#define DIMS 16
#endif
#ifndef CFL_SIZE
#define CFL_SIZE sizeof(complex float)
#endif
static const char usage_str[] = "dimension <input1> ... <inputn> <output>";
static const char help_str[] =
"Join input files along {dimensions}. All other dimensions must have the same size.\n"
"\t Example 1: join 0 slice_001 slice_002 slice_003 full_data\n"
"\t Example 2: join 0 `seq -f \"slice_%%03g\" 0 255` full_data\n";
int main_join(int argc, char* argv[])
{
bool append = false;
const struct opt_s opts[] = {
OPT_SET('a', &append, "append - only works for cfl files!"),
};
cmdline(&argc, argv, 3, 1000, usage_str, help_str, ARRAY_SIZE(opts), opts);
num_init();
int N = DIMS;
int dim = atoi(argv[1]);
assert(dim < N);
int count = argc - 3;
if (append) {
count += 1;
assert(count > 1);
int len = strlen(argv[argc - 1]);
char buf[len + 5];
strcpy(buf, argv[argc - 1]);
strcat(buf, ".cfl");
if (-1 == access(buf, F_OK)) {
// make sure we do not have any other file format
strcpy(buf, argv[argc - 1]);
strcat(buf, ".coo");
assert(-1 == access(buf, F_OK));
strcpy(buf, argv[argc - 1]);
strcat(buf, ".ra");
assert(-1 == access(buf, F_OK));
count--;
append = false;
}
}
long in_dims[count][N];
long offsets[count];
complex float* idata[count];
long sum = 0;
// figure out size of output
for (int l = 0, i = 0; i < count; i++) {
const char* name = NULL;
if (append && (i == 0)) {
name = argv[argc - 1];
} else {
name = argv[2 + l++];
}
debug_printf(DP_DEBUG1, "loading %s\n", name);
idata[i] = load_cfl(name, N, in_dims[i]);
offsets[i] = sum;
sum += in_dims[i][dim];
for (int j = 0; j < N; j++)
assert((dim == j) || (in_dims[0][j] == in_dims[i][j]));
}
long out_dims[N];
for (int i = 0; i < N; i++)
out_dims[i] = in_dims[0][i];
out_dims[dim] = sum;
complex float* out_data = create_cfl(argv[argc - 1], N, out_dims);
long ostr[N];
md_calc_strides(N, ostr, out_dims, CFL_SIZE);
#pragma omp parallel for
for (int i = 0; i < count; i++) {
if (!(append && (0 == i))) {
long pos[N];
md_singleton_strides(N, pos);
pos[dim] = offsets[i];
long istr[N];
md_calc_strides(N, istr, in_dims[i], CFL_SIZE);
md_copy_block(N, pos, out_dims, out_data, in_dims[i], idata[i], CFL_SIZE);
}
unmap_cfl(N, in_dims[i], idata[i]);
debug_printf(DP_DEBUG1, "done copying file %d\n", i);
}
unmap_cfl(N, out_dims, out_data);
return 0;
}
|
mmult.c | #include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#define NRA 2048 /* number of rows in matrix A */
#define NCA 2048 /* number of columns in matrix A */
#define NCB 2048 /* number of columns in matrix B */
struct timeval startTime;
struct timeval finishTime;
double timeIntervalLength;
__sw_global__ double **a; /* [NRA][NCA] */
__sw_global__ double **b; /* [NCA][NCB] */
__sw_global__ double **c; /* [NRA][NCB] */
__sw_global__ double sum;
void* myMalloc(int size, int info)
{
void* t = (void*)malloc(size);
if(!t)
{
printf("\nMemory allocation error [%d]",info);
fflush(stdout);
exit(0);
}
return t;
}
int main (int argc, char *argv[])
{
__sw_global__ long i, j, k;
sum = 0;
a = (double**)myMalloc(NRA*sizeof(double*),1);
for (i=0;i<NCA;i++)
a[i]=(double*)myMalloc(NCA*sizeof(double),2);
b = (double**)myMalloc(NCA*sizeof(double*),3);
for (i=0;i<NCB;i++)
b[i]=(double*)myMalloc(NCB*sizeof(double),4);
c = (double**)myMalloc(NRA*sizeof(double*),5);
for (i=0;i<NCB;i++)
c[i]=(double*)myMalloc(NCB*sizeof(double),6);
/*** Initialize matrices ***/
for(i = 0; i < NRA; i++)
for(j = 0; j < NCA; j++)
a[i][j] = i + j;
for(i = 0; i < NCA; i++)
for(j = 0; j < NCB; j++)
b[i][j] = i * j;
for(i = 0; i < NRA; i++)
for(j = 0; j < NCB; j++)
c[i][j] = 0;
// Start timers
gettimeofday(&startTime, NULL);
#pragma omp parallel private (i, j ,k)
{
#pragma omp for schedule (static, 8)
{
for(i = 0; i < NRA; i++)
for(j = 0; j < NCB; j++)
for(k = 0; k < NCA; k++)
c[i][j] += a[i][k] * b[k][j];
}
}
// End timers
gettimeofday(&finishTime, NULL);
//Calculate the interval length
timeIntervalLength = (double)(finishTime.tv_sec-startTime.tv_sec) * 1000000
+ (double)(finishTime.tv_usec-startTime.tv_usec);
timeIntervalLength=timeIntervalLength/1000;
//Print the interval lenght
printf("__aid_Time: %g msec.\n", timeIntervalLength);
/*** Print results ***/
for(i = 0; i < NRA; i++)
for(j = 0; j < NCB; j++)
sum += c[i][j];
printf("__aid_Result: %g\n\n", sum);
return 0;
}
|
filter.h | #ifndef OPENMC_TALLIES_FILTER_H
#define OPENMC_TALLIES_FILTER_H
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "openmc/hdf5_interface.h"
#include "openmc/particle.h"
#include "pugixml.hpp"
namespace openmc {
//==============================================================================
//! Stores bins and weights for filtered tally events.
//==============================================================================
class FilterMatch
{
public:
std::vector<int> bins_;
std::vector<double> weights_;
};
} // namespace openmc
// Without an explicit instantiation of vector<FilterMatch>, the Intel compiler
// will complain about the threadprivate directive on filter_matches. Note that
// this has to happen *outside* of the openmc namespace
extern template class std::vector<openmc::FilterMatch>;
namespace openmc {
//==============================================================================
//! Modifies tally score events.
//==============================================================================
class Filter
{
public:
virtual ~Filter() = default;
virtual std::string type() const = 0;
//! Uses an XML input to fill the filter's data fields.
virtual void from_xml(pugi::xml_node node) = 0;
//! Matches a tally event to a set of filter bins and weights.
//!
//! \param[out] match will contain the matching bins and corresponding
//! weights; note that there may be zero matching bins
virtual void
get_all_bins(const Particle* p, int estimator, FilterMatch& match) const = 0;
//! Writes data describing this filter to an HDF5 statepoint group.
virtual void
to_statepoint(hid_t filter_group) const
{
write_dataset(filter_group, "type", type());
write_dataset(filter_group, "n_bins", n_bins_);
}
//! Return a string describing a filter bin for the tallies.out file.
//
//! For example, an `EnergyFilter` might return the string
//! "Incoming Energy [0.625E-6, 20.0)".
virtual std::string text_label(int bin) const = 0;
virtual void initialize() {}
int n_bins_;
};
//==============================================================================
// Global variables
//==============================================================================
namespace simulation {
extern std::vector<FilterMatch> filter_matches;
#pragma omp threadprivate(filter_matches)
} // namespace simulation
namespace model {
extern "C" int32_t n_filters;
extern std::vector<std::unique_ptr<Filter>> tally_filters;
} // namespace model
//==============================================================================
extern "C" void free_memory_tally_c();
//==============================================================================
// Filter-related Fortran functions that will be called from C++
extern "C" int verify_filter(int32_t index);
extern "C" Filter* filter_from_f(int32_t index);
extern "C" void filter_update_n_bins(int32_t index);
} // namespace openmc
#endif // OPENMC_TALLIES_FILTER_H
|
generator_spgemm_csc_asparse.c | /******************************************************************************
** Copyright (c) 2015-2019, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
/**
* @file
* This file is part of GemmCodeGenerator.
*
* @author Alexander Heinecke (alexander.heinecke AT mytum.de, http://www5.in.tum.de/wiki/index.php/Alexander_Heinecke,_M.Sc.,_M.Sc._with_honors)
*
* @section LICENSE
* Copyright (c) 2012-2014, Technische Universitaet Muenchen
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
* <DESCRIPTION>
*/
#include "generator_spgemm_csc_asparse.h"
#include "generator_common.h"
#include "libxsmm_main.h"
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(push,target(LIBXSMM_OFFLOAD_TARGET))
#endif
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <stdio.h>
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(pop)
#endif
LIBXSMM_API_INTERN
void libxsmm_sparse_csc_asparse_innerloop_scalar( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_k,
const unsigned int i_z,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_load_sd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_load_sd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_sd(c%u_%u, _mm_mul_sd(a%u_%u, _mm256_castpd256_pd128(b%u)));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_sd(c%u_%u, _mm_mul_sd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_sd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_load_ss(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_load_ss(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ss(c%u_%u, _mm_mul_ss(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_ss(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_sparse_csc_asparse_innerloop_two_vector( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_k,
const unsigned int i_z,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_loadu_pd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, _mm256_castpd256_pd128(b%u)));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_castpd_ps(_mm_load_sd((const double*)&C[(l_n*%u)+%u]));\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_castpd_ps(_mm_load_sd((const double*)&A[%u]));\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ps(c%u_%u, _mm_mul_ps(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_sd((double*)&C[(l_n*%u)+%u], _mm_castps_pd(c%u_%u));\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_sparse_csc_asparse_innerloop_four_vector( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_k,
const unsigned int i_z,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
unsigned int l_i;
unsigned int l_z = i_z;
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m256d c%u_%u = _mm256_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m256d a%u_%u = _mm256_loadu_pd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm256_add_pd(c%u_%u, _mm256_mul_pd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm256_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
for ( l_i = 0; l_i < 2; l_i++ ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, l_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + l_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_loadu_pd(&A[%u]);\n", i_k, l_z, i_column_idx[i_k] + l_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, b%u));\n", i_k, l_z, i_k, l_z, i_k, l_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + l_z], i_k, l_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_z += 2;
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_loadu_ps(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_loadu_ps(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ps(c%u_%u, _mm_mul_ps(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_ps(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_spgemm_csc_asparse( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx,
const double* i_values ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
unsigned int l_k;
unsigned int l_flop_count = 0;
LIBXSMM_UNUSED(i_arch);
LIBXSMM_UNUSED(i_values);
/* loop over columns in C in generated code, we fully unroll inside each column */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n #pragma nounroll_and_jam\n for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* reset the current column in C if needed */
if (0 != (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=0 */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
if ( i_xgemm_desc->m > 1 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n C[(l_n*%u)+l_m] = 0.0;\n }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n C[(l_n*%u)+l_m] = 0.0f;\n }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
}
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
assert(0 != i_column_idx);
/* loop over columns in A, rows in B and fully unroll */
for ( l_k = 0; l_k < (unsigned int)i_xgemm_desc->k; l_k++ ) {
unsigned int l_column_elements = i_column_idx[l_k + 1] - i_column_idx[l_k];
unsigned int l_z = 0;
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) || defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
if ( l_column_elements > 0 ) {
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n __m256d b%u = _mm256_broadcast_sd(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n __m128d b%u = _mm_loaddup_pd(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n __m128 b%u = _mm_broadcast_ss(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n __m128 b%u = _mm_load_ss(&B[(l_n*%u)+%u]); b%u = _mm_shuffle_ps(b%u, b%u, 0x00);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k, l_k, l_k, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
/* loop over the columns of A and look for vectorization potential */
for ( l_z = 0; l_z < l_column_elements; l_z++ ) {
assert(0 != i_row_idx);
/* 4 element vector might be possible */
if ( (l_z < (l_column_elements - 3)) && (l_column_elements > 3) ) {
/* check for 256bit vector instruction */
if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) &&
(i_row_idx[i_column_idx[l_k] + l_z] + 2 == i_row_idx[i_column_idx[l_k] + l_z + 2]) &&
(i_row_idx[i_column_idx[l_k] + l_z] + 3 == i_row_idx[i_column_idx[l_k] + l_z + 3]) &&
(i_row_idx[i_column_idx[l_k] + l_z + 3] < (unsigned int)i_xgemm_desc->m)) {
libxsmm_sparse_csc_asparse_innerloop_four_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
l_z += 3;
/* check for 128bit vector instruction */
} else if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) &&
(i_row_idx[i_column_idx[l_k] + l_z + 1] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_two_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
l_z++;
/* scalar instruction */
} else {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
}
}
/* 2 element vector might be possible */
} else if ( (l_z < (l_column_elements - 1)) && (l_column_elements > 1)) {
/* check for 128bit vector instruction */
if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) &&
(i_row_idx[i_column_idx[l_k] + l_z + 1] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_two_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
l_z++;
/* scalar instruction */
} else {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
}
}
/* scalar anyways */
} else {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
}
}
}
/* C fallback code */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#else\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* loop over the columns of A */
for ( l_z = 0; l_z < l_column_elements; l_z++ ) {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[(l_n*%u)+%u] += A[%u] * B[(l_n*%u)+%u];\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[l_k] + l_z], i_column_idx[l_k] + l_z, (unsigned int)i_xgemm_desc->ldb, l_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_flop_count += 2;
}
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* add flop counter */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n#pragma omp atomic\n#endif\nlibxsmm_num_total_flops += %u;\n#endif\n", l_flop_count * i_xgemm_desc->n);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
|
PR44893.c | // RUN: %clang -fopenmp -O -g -x c %s -S -disable-output -o %t
// Do not crash ;)
void foo()
{
#pragma omp critical
;
}
void bar()
{
foo();
foo();
}
|
GB_apply_op.c | //------------------------------------------------------------------------------
// GB_apply_op: typecast and apply a unary/binary/idxunop operator to an array
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Cx = op (A)
// Cx and A->x may be aliased.
// This function is CSR/CSC agnostic. For positional ops, A is treated as if
// it is in CSC format. The caller has already modified the op if A is in CSR
// format.
// Template/GB_positional_op_ijp can return GrB_OUT_OF_MEMORY.
// Otherwise, this function only returns GrB_SUCCESS.
#include "GB_apply.h"
#include "GB_binop.h"
#include "GB_ek_slice.h"
#include "GB_unused.h"
#ifndef GBCOMPACT
#include "GB_unop__include.h"
#include "GB_binop__include.h"
#endif
#define GB_FREE_ALL \
{ \
GB_WERK_POP (A_ek_slicing, int64_t) ; \
}
GrB_Info GB_apply_op // apply a unary op, idxunop, or binop, Cx = op (A)
(
GB_void *Cx, // output array
const GrB_Type ctype, // type of C
const GB_iso_code C_code_iso, // C non-iso, or code to compute C iso value
const GB_Operator op, // unary/index-unary/binop to apply
const GrB_Scalar scalar, // scalar to bind to binary operator
bool binop_bind1st, // if true, C=binop(s,A), else C=binop(A,s)
bool flipij, // if true, flip i,j for user idxunop
const GrB_Matrix A, // input matrix
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (Cx != NULL) ;
ASSERT_MATRIX_OK (A, "A input for GB_apply_op", GB0) ;
ASSERT (GB_JUMBLED_OK (A)) ; // A can be jumbled
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
ASSERT (GB_IMPLIES (op != NULL, ctype == op->ztype)) ;
ASSERT_SCALAR_OK_OR_NULL (scalar, "scalar for GB_apply_op", GB0) ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
// A->x is not const since the operator might be applied in-place, if
// C is aliased to C.
GB_void *Ax = (GB_void *) A->x ; // A->x has type A->type
const int8_t *Ab = A->b ; // only if A is bitmap
const GrB_Type Atype = A->type ; // type of A->x
const int64_t anz = GB_nnz_held (A) ; // size of A->x and Cx
//--------------------------------------------------------------------------
// determine the maximum number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// get the operator
//--------------------------------------------------------------------------
GB_Opcode opcode ;
bool op_is_unop = false ;
bool op_is_binop = false ;
if (op != NULL)
{
opcode = op->opcode ;
op_is_unop = GB_IS_UNARYOP_CODE (opcode) ;
op_is_binop = GB_IS_BINARYOP_CODE (opcode) ;
}
else
{
// C is iso, with no operator to apply; just call GB_iso_unop below.
ASSERT (C_code_iso == GB_ISO_1 || // C iso value is 1
C_code_iso == GB_ISO_S || // C iso value is the scalar
C_code_iso == GB_ISO_A) ; // C iso value is the iso value of A
opcode = GB_NOP_code ;
}
//--------------------------------------------------------------------------
// apply the operator
//--------------------------------------------------------------------------
if (GB_OPCODE_IS_POSITIONAL (opcode))
{
//----------------------------------------------------------------------
// built-in positional unary, index_unary, or binary operator
//----------------------------------------------------------------------
bool is64 = (op->ztype == GrB_INT64) ;
bool is32 = (op->ztype == GrB_INT32) ;
ASSERT_OP_OK (op, "positional unop/idxunop/binop: GB_apply_op", GB0) ;
// get A and C
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ai = A->i ;
int64_t anvec = A->nvec ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
//----------------------------------------------------------------------
// determine number of threads to use
//----------------------------------------------------------------------
int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ;
//----------------------------------------------------------------------
// Cx = positional_op (A)
//----------------------------------------------------------------------
int64_t thunk = GB_positional_offset (opcode, scalar) ;
// GB_positional_op_ijp allocates a set of tasks, which can possibly
// fail if out of memory.
if (is64)
{
//------------------------------------------------------------------
// int64 Cx = positional_op (A)
//------------------------------------------------------------------
int64_t *restrict Cz = (int64_t *) Cx ;
switch (opcode)
{
case GB_POSITIONI_unop_code : // z = position_i(A(i,j)) == i
case GB_POSITIONI1_unop_code : // z = position_i1(A(i,j)) == i+1
case GB_FIRSTI_binop_code : // z = first_i(A(i,j),y) == i
case GB_FIRSTI1_binop_code : // z = first_i1(A(i,j),y) == i+1
case GB_SECONDI_binop_code : // z = second_i(x,A(i,j)) == i
case GB_SECONDI1_binop_code : // z = second_i1(x,A(i,j)) == i+1
case GB_ROWINDEX_idxunop_code : // z = i+thunk
#define GB_APPLY(p) \
Cz [p] = (i + thunk) ;
#include "GB_positional_op_ip.c"
return (GrB_SUCCESS) ;
case GB_POSITIONJ_unop_code : // z = position_j(A(i,j)) == j
case GB_POSITIONJ1_unop_code : // z = position_j1(A(i,j)) == j+1
case GB_FIRSTJ_binop_code : // z = first_j(A(i,j),y) == j
case GB_FIRSTJ1_binop_code : // z = first_j1(A(i,j),y) == j+1
case GB_SECONDJ_binop_code : // z = second_j(x,A(i,j)) == j
case GB_SECONDJ1_binop_code : // z = second_j1(x,A(i,j)) == j+1
case GB_COLINDEX_idxunop_code : // z = j+thunk
#define GB_APPLY(p) \
Cz [p] = (j + thunk) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_DIAGINDEX_idxunop_code : // z = (j-(i+thunk)
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (j - (i+thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_FLIPDIAGINDEX_idxunop_code : // z = (i-(j+thunk)
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (i - (j+thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
default: ;
}
}
else if (is32)
{
//------------------------------------------------------------------
// int32 Cx = positional_op (A)
//------------------------------------------------------------------
int32_t *restrict Cz = (int32_t *) Cx ;
switch (opcode)
{
case GB_POSITIONI_unop_code : // z = position_i(A(i,j)) == i
case GB_POSITIONI1_unop_code : // z = position_i1(A(i,j)) == i+1
case GB_FIRSTI_binop_code : // z = first_i(A(i,j),y) == i
case GB_FIRSTI1_binop_code : // z = first_i1(A(i,j),y) == i+1
case GB_SECONDI_binop_code : // z = second_i(x,A(i,j)) == i
case GB_SECONDI1_binop_code : // z = second_i1(x,A(i,j)) == i+1
case GB_ROWINDEX_idxunop_code : // z = i+thunk
#define GB_APPLY(p) \
Cz [p] = (int32_t) (i + thunk) ;
#include "GB_positional_op_ip.c"
return (GrB_SUCCESS) ;
case GB_POSITIONJ_unop_code : // z = position_j(A(i,j)) == j
case GB_POSITIONJ1_unop_code : // z = position_j1(A(i,j)) == j+1
case GB_FIRSTJ_binop_code : // z = first_j(A(i,j),y) == j
case GB_FIRSTJ1_binop_code : // z = first_j1(A(i,j),y) == j+1
case GB_SECONDJ_binop_code : // z = second_j(x,A(i,j)) == j
case GB_SECONDJ1_binop_code : // z = second_j1(x,A(i,j)) == j+1
case GB_COLINDEX_idxunop_code : // z = j+thunk
#define GB_APPLY(p) \
Cz [p] = (int32_t) (j + thunk) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_DIAGINDEX_idxunop_code : // z = (j-(i+thunk)
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (int32_t) (j - (i+thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_FLIPDIAGINDEX_idxunop_code : // z = (i-(j+thunk)
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (int32_t) (i - (j+thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
default: ;
}
}
else
{
//------------------------------------------------------------------
// bool Cx = positional_op (A)
//------------------------------------------------------------------
ASSERT (op->ztype == GrB_BOOL) ;
bool *restrict Cz = (bool *) Cx ;
switch (opcode)
{
case GB_TRIL_idxunop_code : // z = (j <= (i+thunk))
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (j <= (i + thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_TRIU_idxunop_code : // z = (j >= (i+thunk))
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (j >= (i + thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_DIAG_idxunop_code : // z = (j == (i+thunk))
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (j == (i + thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_OFFDIAG_idxunop_code : // z = (j != (i+thunk))
#define GB_APPLY(p) \
int64_t i = GBI (Ai, p, avlen) ; \
Cz [p] = (j != (i + thunk)) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_COLLE_idxunop_code : // z = (j <= thunk)
#define GB_APPLY(p) \
Cz [p] = (j <= thunk) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_COLGT_idxunop_code : // z = (j > thunk)
#define GB_APPLY(p) \
Cz [p] = (j > thunk) ;
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
case GB_ROWLE_idxunop_code : // z = (i <= thunk)
#define GB_APPLY(p) \
Cz [p] = (i <= thunk) ;
#include "GB_positional_op_ip.c"
return (GrB_SUCCESS) ;
case GB_ROWGT_idxunop_code : // z = (i > thunk)
#define GB_APPLY(p) \
Cz [p] = (i > thunk) ;
#include "GB_positional_op_ip.c"
return (GrB_SUCCESS) ;
default: ;
}
}
}
else if (C_code_iso != GB_NON_ISO)
{
//----------------------------------------------------------------------
// apply the unary or binary operator to the iso value
//----------------------------------------------------------------------
// if C is iso, this function takes O(1) time
GBURBLE ("(iso apply) ") ;
ASSERT_MATRIX_OK (A, "A passing to GB_iso_unop", GB0) ;
if (anz > 0)
{
// Cx [0] = unop (A), binop (scalar,A), or binop (A,scalar)
GB_iso_unop (Cx, ctype, C_code_iso, op, A, scalar) ;
}
}
else if (op_is_unop)
{
//----------------------------------------------------------------------
// apply the unary operator to all entries
//----------------------------------------------------------------------
ASSERT_UNARYOP_OK (op, "unop for GB_apply_op", GB0) ;
ASSERT (!A->iso) ;
// determine number of threads to use
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
#ifndef GBCOMPACT
if (Atype == op->xtype || opcode == GB_IDENTITY_unop_code)
{
// The switch factory is used if the op is IDENTITY, or if no
// typecasting is being done. IDENTITY operator can do arbitrary
// typecasting (it is not used if no typecasting is done).
//------------------------------------------------------------------
// define the worker for the switch factory
//------------------------------------------------------------------
#define GB_unop_apply(unop,zname,aname) \
GB (_unop_apply_ ## unop ## zname ## aname)
#define GB_WORKER(unop,zname,ztype,aname,atype) \
{ \
if (GB_unop_apply (unop,zname,aname) ((ztype *) Cx, \
(const atype *) Ax, Ab, anz, nthreads) \
== GrB_SUCCESS) return (GrB_SUCCESS) ; \
} \
break ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
#include "GB_unop_factory.c"
}
#endif
//----------------------------------------------------------------------
// generic worker: typecast and apply a unary operator
//----------------------------------------------------------------------
GB_BURBLE_N (anz, "(generic apply: %s) ", op->name) ;
size_t asize = Atype->size ;
size_t zsize = op->ztype->size ;
size_t xsize = op->xtype->size ;
GB_Type_code acode = Atype->code ;
GB_Type_code xcode = op->xtype->code ;
GB_cast_function cast_A_to_X = GB_cast_factory (xcode, acode) ;
GxB_unary_function fop = op->unop_function ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
// xwork = (xtype) Ax [p]
GB_void xwork [GB_VLA(xsize)] ;
cast_A_to_X (xwork, Ax +(p)*asize, asize) ;
// Cx [p] = fop (xwork)
fop (Cx +(p*zsize), xwork) ;
}
}
else if (op_is_binop)
{
//----------------------------------------------------------------------
// apply a binary operator (bound to a scalar)
//----------------------------------------------------------------------
ASSERT_BINARYOP_OK (op, "standard binop for GB_apply_op", GB0) ;
ASSERT_SCALAR_OK (scalar, "scalar for GB_apply_op", GB0) ;
GB_Type_code xcode, ycode, zcode ;
ASSERT (opcode != GB_FIRST_binop_code) ;
ASSERT (opcode != GB_SECOND_binop_code) ;
ASSERT (opcode != GB_PAIR_binop_code) ;
ASSERT (opcode != GB_ANY_binop_code) ;
size_t asize = Atype->size ;
size_t ssize = scalar->type->size ;
size_t zsize = op->ztype->size ;
size_t xsize = op->xtype->size ;
size_t ysize = op->ytype->size ;
GB_Type_code scode = scalar->type->code ;
xcode = op->xtype->code ;
ycode = op->ytype->code ;
// typecast the scalar to the operator input
size_t ssize_cast ;
GB_Type_code scode_cast ;
if (binop_bind1st)
{
ssize_cast = xsize ;
scode_cast = xcode ;
}
else
{
ssize_cast = ysize ;
scode_cast = ycode ;
}
GB_void swork [GB_VLA(ssize_cast)] ;
GB_void *scalarx = (GB_void *) scalar->x ;
if (scode_cast != scode)
{
// typecast the scalar to the operator input, in swork
GB_cast_function cast_s = GB_cast_factory (scode_cast, scode) ;
cast_s (swork, scalar->x, ssize) ;
scalarx = swork ;
}
// determine number of threads to use
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
#ifndef GBCOMPACT
if (binop_bind1st)
{
//------------------------------------------------------------------
// z = binop (scalar,Ax)
//------------------------------------------------------------------
if (GB_binop_builtin (op->xtype, false, Atype, false,
(GrB_BinaryOp) op, false, &opcode, &xcode, &ycode, &zcode))
{
//--------------------------------------------------------------
// define the worker for the switch factory
//--------------------------------------------------------------
#define GB_bind1st(binop,xname) GB (_bind1st_ ## binop ## xname)
#define GB_BINOP_WORKER(binop,xname) \
{ \
if (GB_bind1st (binop, xname) (Cx, scalarx, Ax, \
Ab, anz, nthreads) == GrB_SUCCESS) \
return (GrB_SUCCESS) ; \
} \
break ;
//--------------------------------------------------------------
// launch the switch factory
//--------------------------------------------------------------
#define GB_NO_FIRST
#define GB_NO_SECOND
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
}
else
{
//------------------------------------------------------------------
// z = binop (Ax,scalar)
//------------------------------------------------------------------
if (GB_binop_builtin (Atype, false, op->ytype, false,
(GrB_BinaryOp) op, false, &opcode, &xcode, &ycode, &zcode))
{
//--------------------------------------------------------------
// define the worker for the switch factory
//--------------------------------------------------------------
#define GB_bind2nd(binop,xname) GB (_bind2nd_ ## binop ## xname)
#undef GB_BINOP_WORKER
#define GB_BINOP_WORKER(binop,xname) \
{ \
if (GB_bind2nd (binop, xname) (Cx, Ax, scalarx, \
Ab, anz, nthreads) == GrB_SUCCESS) \
return (GrB_SUCCESS) ; \
} \
break ;
//--------------------------------------------------------------
// launch the switch factory
//--------------------------------------------------------------
#define GB_NO_FIRST
#define GB_NO_SECOND
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
}
#endif
//----------------------------------------------------------------------
// generic worker: typecast and apply a binary operator
//----------------------------------------------------------------------
GB_BURBLE_N (anz, "(generic apply: %s) ", op->name) ;
GB_Type_code acode = Atype->code ;
GxB_binary_function fop = op->binop_function ;
ASSERT (!A->iso) ;
if (binop_bind1st)
{
// Cx = binop (scalar,Ax)
GB_cast_function cast_A_to_Y = GB_cast_factory (ycode, acode) ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
// ywork = (ytype) Ax [p]
GB_void ywork [GB_VLA(ysize)] ;
cast_A_to_Y (ywork, Ax +(p)*asize, asize) ;
// Cx [p] = fop (scalarx, ywork)
fop (Cx +((p)*zsize), scalarx, ywork) ;
}
}
else
{
// Cx = binop (Ax,scalar)
GB_cast_function cast_A_to_X = GB_cast_factory (xcode, acode) ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
// xwork = (xtype) Ax [p]
GB_void xwork [GB_VLA(xsize)] ;
cast_A_to_X (xwork, Ax +(p)*asize, asize) ;
// Cx [p] = fop (xwork, scalarx)
fop (Cx +(p*zsize), xwork, scalarx) ;
}
}
}
else
{
//----------------------------------------------------------------------
// apply a user-defined index_unary op
//----------------------------------------------------------------------
// All valued GrB_IndexUnaryOps (GrB_VALUE*) have already been renamed
// to their corresponding binary op (GrB_VALUEEQ_FP32 became
// GrB_EQ_FP32, for example). The only remaining index unary ops are
// positional, and user-defined. Positional ops have been handled
// above, so only user-defined index unary ops are left.
// get A and C
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ai = A->i ;
int64_t anvec = A->nvec ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
ASSERT (opcode == GB_USER_idxunop_code) ;
GxB_index_unary_function fop = op->idxunop_function ;
size_t asize = Atype->size ;
size_t ssize = scalar->type->size ;
size_t zsize = op->ztype->size ;
size_t xsize = op->xtype->size ;
size_t ysize = op->ytype->size ;
GB_Type_code scode = scalar->type->code ;
GB_Type_code acode = Atype->code ;
GB_Type_code xcode = op->xtype->code ;
GB_Type_code ycode = op->ytype->code ;
GB_cast_function cast_A_to_X = GB_cast_factory (xcode, acode) ;
GB_void ywork [GB_VLA(ysize)] ;
GB_void *ythunk = (GB_void *) scalar->x ;
if (ycode != scode)
{
// typecast the scalar to the operator input, in ywork
GB_cast_function cast_s = GB_cast_factory (ycode, scode) ;
cast_s (ywork, scalar->x, ssize) ;
ythunk = ywork ;
}
#define GB_APPLY(p) \
if (!GBB (Ab, p)) continue ; \
int64_t i = GBI (Ai, p, avlen) ; \
GB_void xwork [GB_VLA(xsize)] ; \
cast_A_to_X (xwork, Ax +(p)*asize, asize) ; \
fop (Cx +(p*zsize), xwork, \
flipij ? j : i, flipij ? i : j, ythunk) ;
#include "GB_positional_op_ijp.c"
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
}
|
GB_unaryop__abs_uint32_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint32_bool
// op(A') function: GB_tran__abs_uint32_bool
// C type: uint32_t
// A type: bool
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint32_bool
(
uint32_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bor_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__bor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__bor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_uint64)
// A*D function (colscale): GB (_AxD__bor_uint64)
// D*A function (rowscale): GB (_DxB__bor_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__bor_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_uint64)
// C=scalar+B GB (_bind1st__bor_uint64)
// C=scalar+B' GB (_bind1st_tran__bor_uint64)
// C=A+scalar GB (_bind2nd__bor_uint64)
// C=A'+scalar GB (_bind2nd_tran__bor_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_UINT64 || GxB_NO_BOR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bor_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
diagsm_x_dia_n_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_Number diag[A->rows];
memset(diag, '\0', A->rows * sizeof(ALPHA_Number));
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT i = 0; i < A->ndiag; i++)
{
if(A->distance[i] == 0)
{
for (ALPHA_INT r = 0; r < A->rows; r++)
{
diag[r] = A->values[i * A->lval + r];
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT c = 0; c < columns; ++c)
{
for (ALPHA_INT r = 0; r < A->rows; ++r)
{
ALPHA_Number t;
alpha_setzero(t);
alpha_mul(t, alpha, x[index2(c, r, ldx)]);
alpha_div(y[index2(c, r, ldy)], t, diag[r]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
omp_tls_copy.c | #include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
struct tls_data {
int cnt;
char data[32];
} tls;
#pragma omp threadprivate(tls)
int main()
{
tls.cnt = 5;
strcpy(tls.data, "12345");
printf("[Master] tls(%.*s)\n", tls.cnt, tls.data);
omp_set_num_threads(4);
#pragma omp parallel copyin(tls)
{
#pragma omp sections
{
#pragma omp section
{
printf("[T:%d] 1:tls(%.*s)\n",
omp_get_thread_num(), tls.cnt, tls.data);
sleep(1);
}
#pragma omp section
{
printf("[T:%d] 1:tls(%.*s)\n",
omp_get_thread_num(), tls.cnt, tls.data);
sleep(1);
}
} /* sections */
#pragma omp single copyprivate(tls)
{
tls.cnt = 3;
strcpy(tls.data, "xyz");
printf("[T:%d] single copyprivate(%.*s)\n",
omp_get_thread_num(), tls.cnt, tls.data);
} /* single */
#pragma omp sections
{
#pragma omp section
{
printf("[T:%d] 2:tls(%.*s)\n",
omp_get_thread_num(), tls.cnt, tls.data);
sleep(1);
}
#pragma omp section
{
printf("[T:%d] 2:tls(%.*s)\n",
omp_get_thread_num(), tls.cnt, tls.data);
sleep(1);
}
} /* sections */
} /* parallel */
printf("[Master] tls(%.*s)\n", tls.cnt, tls.data);
return 0;
}
|
filter.c | /* Copyright 2015-2017. The Regents of the University of California.
* Copyright 2016-2017. Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2012-2017 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2017 Jon Tamir <jtamir@eecs.berkeley.edu>
*/
#include <assert.h>
#include <stdlib.h>
#include <complex.h>
#include <math.h>
#include <strings.h>
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/loop.h"
#include "misc/misc.h"
#include "misc/nested.h"
#include "filter.h"
static int cmp_float(const void* a, const void* b)
{
return (*(float*)a - *(float*)b > 0.) ? 1. : -1.;
}
static int cmp_complex_float(const void* a, const void* b) // gives sign for 0. (not 0)
{
return (cabsf(*(complex float*)a) - cabsf(*(complex float*)b) > 0.) ? 1. : -1.;
}
static void sort_floats(int N, float ar[N])
{
qsort((void*)ar, N, sizeof(float), cmp_float);
}
static void sort_complex_floats(int N, complex float ar[N])
{
qsort((void*)ar, N, sizeof(complex float), cmp_complex_float);
}
float median_float(int N, const float ar[N])
{
float tmp[N];
memcpy(tmp, ar, N * sizeof(float));
sort_floats(N, tmp);
return (1 == N % 2) ? tmp[(N - 1) / 2] : ((tmp[(N - 1) / 2 + 0] + tmp[(N - 1) / 2 + 1]) / 2.);
}
complex float median_complex_float(int N, const complex float ar[N])
{
complex float tmp[N];
memcpy(tmp, ar, N * sizeof(complex float));
sort_complex_floats(N, tmp);
return (1 == N % 2) ? tmp[(N - 1) / 2] : ((tmp[(N - 1) / 2 + 0] + tmp[(N - 1) / 2 + 1]) / 2.);
}
void md_medianz2(int D, int M, const long dim[D], const long ostr[D], complex float* optr, const long istr[D], const complex float* iptr)
{
assert(M < D);
const long* nstr[2] = { ostr, istr };
void* nptr[2] = { optr, (void*)iptr };
long length = dim[M];
long stride = istr[M];
long dim2[D];
for (int i = 0; i < D; i++)
dim2[i] = dim[i];
dim2[M] = 1;
NESTED(void, nary_medianz, (void* ptr[]))
{
complex float tmp[length];
for (long i = 0; i < length; i++)
tmp[i] = *((complex float*)(ptr[1] + i * stride));
*(complex float*)ptr[0] = median_complex_float(length, tmp);
};
md_nary(2, D, dim2, nstr, nptr, nary_medianz);
}
void md_medianz(int D, int M, const long dim[D], complex float* optr, const complex float* iptr)
{
assert(M < D);
long dim2[D];
for (int i = 0; i < D; i++)
dim2[i] = dim[i];
dim2[M] = 1;
long istr[D];
long ostr[D];
md_calc_strides(D, istr, dim, 8);
md_calc_strides(D, ostr, dim2, 8);
md_medianz2(D, M, dim, ostr, optr, istr, iptr);
}
void centered_gradient(unsigned int N, const long dims[N], const complex float grad[N], complex float* out)
{
md_zgradient(N, dims, out, grad);
long dims0[N];
md_singleton_dims(N, dims0);
long strs0[N];
md_calc_strides(N, strs0, dims0, CFL_SIZE);
complex float cn = 0.;
for (unsigned int n = 0; n < N; n++)
cn -= grad[n] * (float)dims[n] / 2.;
long strs[N];
md_calc_strides(N, strs, dims, CFL_SIZE);
md_zadd2(N, dims, strs, out, strs, out, strs0, &cn);
}
void linear_phase(unsigned int N, const long dims[N], const float pos[N], complex float* out)
{
complex float grad[N];
for (unsigned int n = 0; n < N; n++)
grad[n] = 2. * M_PI * (float)(pos[n]) / ((float)dims[n]);
centered_gradient(N, dims, grad, out);
md_zexpj(N, dims, out, out);
}
void klaplace_scaled(int N, const long dims[N], long flags, const float sc[N], complex float* out)
{
long flags2 = flags;
complex float* tmp = md_alloc(N, dims, CFL_SIZE);
md_clear(N, dims, out, CFL_SIZE);
for (int i = 0; i < bitcount(flags); i++) {
unsigned int lsb = ffs(flags2) - 1;
flags2 = MD_CLEAR(flags2, lsb);
complex float grad[N];
for (int j = 0; j < N; j++)
grad[j] = 0.;
grad[lsb] = sc[lsb];
centered_gradient(N, dims, grad, tmp);
md_zspow(N, dims, tmp, tmp, 2.);
md_zadd(N, dims, out, out, tmp);
}
md_free(tmp);
}
void klaplace(int N, const long dims[N], long flags, complex float* out)
{
float sc[N];
for (int j = 0; j < N; j++)
sc[j] = 1. / (float)dims[j];
klaplace_scaled(N, dims, flags, sc, out);
}
static void nary_zwindow(const long N, const float alpha, const float beta, complex float* ptr)
{
if (1 == N) {
ptr[0] = 1.;
return;
}
#pragma omp parallel for
for (long i = 0; i < N; i++)
ptr[i] = alpha - beta * cosf(2. * M_PI * i / (N - 1));
}
static void nary_zhamming(const long N, complex float* ptr)
{
#if 0
const float alpha = 0.53836;
const float beta = 0.46164;
#else
const float alpha = 0.54;
const float beta = 0.46;
#endif
return nary_zwindow(N, alpha, beta, ptr);
}
static void nary_zhann(const long N, complex float* ptr)
{
const float alpha = 0.5;
const float beta = 0.5;
return nary_zwindow(N, alpha, beta, ptr);
}
enum window_type { WINDOW_HAMMING, WINDOW_HANN };
static void md_zwindow2(unsigned int D, const long dims[D], unsigned int flags, const long ostrs[D], complex float* optr, const long istrs[D], const complex float* iptr, enum window_type wt)
{
if (0 == flags) {
md_copy2(D, dims, ostrs, optr, istrs, iptr, CFL_SIZE);
return;
}
// process first flagged dimension
unsigned int lsb = ffs(flags) - 1;
long win_dims[D];
long win_strs[D];
md_select_dims(D, MD_BIT(lsb), win_dims, dims);
md_calc_strides(D, win_strs, win_dims, CFL_SIZE);
complex float* win = md_alloc_sameplace(D, win_dims, CFL_SIZE, iptr);
switch (wt) {
case WINDOW_HAMMING: nary_zhamming(dims[lsb], win); break;
case WINDOW_HANN: nary_zhann(dims[lsb], win); break;
};
md_zmul2(D, dims, ostrs, optr, istrs, iptr, win_strs, win);
md_free(win);
flags = MD_CLEAR(flags, lsb);
// process other dimensions
if (0 != flags)
md_zwindow2(D, dims, flags, ostrs, optr, ostrs, optr, wt);
return;
}
#if 0
static void md_zwindow(const unsigned int D, const long dims[D], const long flags, complex float* optr, const complex float* iptr, bool hamming)
{
long strs[D];
md_calc_strides(D, strs, dims, CFL_SIZE);
md_zwindow2(D, dims, flags, strs, optr, strs, iptr, hamming);
}
#endif
/*
* Apply Hamming window to iptr along flags
*/
void md_zhamming(const unsigned int D, const long dims[D], const long flags, complex float* optr, const complex float* iptr)
{
long strs[D];
md_calc_strides(D, strs, dims, CFL_SIZE);
return md_zhamming2(D, dims, flags, strs, optr, strs, iptr);
}
/*
* Apply Hamming window to iptr along flags (with strides)
*/
void md_zhamming2(const unsigned int D, const long dims[D], const long flags, const long ostrs[D], complex float* optr, const long istrs[D], const complex float* iptr)
{
return md_zwindow2(D, dims, flags, ostrs, optr, istrs, iptr, WINDOW_HAMMING);
}
/*
* Apply Hann window to iptr along flags
*/
void md_zhann(const unsigned int D, const long dims[D], const long flags, complex float* optr, const complex float* iptr)
{
long strs[D];
md_calc_strides(D, strs, dims, CFL_SIZE);
return md_zhann2(D, dims, flags, strs, optr, strs, iptr);
}
/*
* Apply Hann window to iptr along flags (with strides)
*/
void md_zhann2(const unsigned int D, const long dims[D], const long flags, const long ostrs[D], complex float* optr, const long istrs[D], const complex float* iptr)
{
return md_zwindow2(D, dims, flags, ostrs, optr, istrs, iptr, WINDOW_HANN);
}
|
dependences.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
#include <omp.h>
#include <math.h>
#include <unistd.h>
int main()
{
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp single
{
#pragma omp task depend(out:x)
{
x++;
sleep(1);
}
#pragma omp task depend(in:x)
{
x = -1;
}
}
}
printf("x = %d\n", x);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_dependences'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_dependence'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}{{[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter=[[NULL]], new_task_id=[[FIRST_TASK:[0-f]+]], parallel_function={{0x[0-f]+}}, task_type=ompt_task_explicit=3, has_dependences=yes
// CHECK: {{^}}{{[0-9]+}}: ompt_event_task_dependences: task_id=[[FIRST_TASK]], deps={{0x[0-f]+}}, ndeps=1
// CHECK: {{^}}{{[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter=[[NULL]], new_task_id=[[SECOND_TASK:[0-f]+]], parallel_function={{0x[0-f]+}}, task_type=ompt_task_explicit=3, has_dependences=yes
// CHECK: {{^}}{{[0-9]+}}: ompt_event_task_dependences: task_id=[[SECOND_TASK]], deps={{0x[0-f]+}}, ndeps=1
// CHECK: {{^}}{{[0-9]+}}: ompt_event_task_dependence_pair: first_task_id=[[FIRST_TASK]], second_task_id=[[SECOND_TASK]]
return 0;
}
|
IcgGeneralizedNablaT.c | // Copyright (C) 2016 Gernot Riegler
// Institute for Computer Graphics and Vision (ICG)
// Graz University of Technology (TU GRAZ)
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. All advertising materials mentioning features or use of this software
// must display the following acknowledgement:
// This product includes software developed by the ICG, TU GRAZ.
// 4. Neither the name of the ICG, TU GRAZ nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE PROVIDER BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/IcgGeneralizedNablaT.c"
#else
static int icgnn_(IcgGeneralizedNablaT_updateOutput)(lua_State *L)
{
THTensor* in = luaT_checkudata(L, 2, torch_Tensor);
int neg = luaT_getfieldcheckboolean(L, 1, "neg");
THTensor* dir = luaT_getfieldcheckudata(L, 1, "directions", torch_Tensor);
THTensor* out = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
long n_dim = in->nDimension;
luaL_argcheck(L, n_dim == 3 || n_dim == 4, 2, "3D or 4D(batch mode) tensor expected");
in = THTensor_(newContiguous)(in);
real* in_data = THTensor_(data)(in);
real* dir_data = THTensor_(data)(dir);
long n_dir = dir->size[1];
long num, channels, height, width, out_channels;
if(n_dim == 3) {
num = 1;
channels = in->size[0];
height = in->size[1];
width = in->size[2];
out_channels = channels / n_dir;
THTensor_(resize3d)(out, out_channels, height, width);
}
else if(n_dim == 4) {
num = in->size[0];
channels = in->size[1];
height = in->size[2];
width = in->size[3];
out_channels = channels / n_dir;
THTensor_(resize4d)(out, num, out_channels, height, width);
}
luaL_argcheck(L, channels % n_dir == 0, 2, "input channels % 2 != 0");
real* out_data = THTensor_(data)(out);
long offset = height * width;
long n;
#pragma omp parallel for private(n)
for(n = 0; n < num; ++n) {
long c;
for(c = 0; c < channels; c += n_dir) {
long h;
for(h = 0; h < height; ++h) {
long w;
for(w = 0; w < width; ++w) {
long out_idx = ((n * out_channels + c/n_dir) * height + h) * width + w;
out_data[out_idx] = 0;
int dir_idx = 0;
for(dir_idx = 0; dir_idx < n_dir; ++dir_idx) {
// minus (-) dir data, because it has to be inv to GeneralizedNabla
long dir_x = -dir_data[dir_idx];
long dir_y = -dir_data[n_dir + dir_idx];
long in_idx = ((n * channels + c+dir_idx) * height + h) * width + w;
if(w+dir_x >= 0 && h+dir_y >= 0 && w+dir_x < width && h+dir_y < height) {
out_data[out_idx] += in_data[in_idx + dir_y * width + dir_x];
}
if(w-dir_x >= 0 && h-dir_y >= 0 && w-dir_x < width && h-dir_y < height) {
out_data[out_idx] -= in_data[in_idx];
}
}
if(neg) {
out_data[out_idx] = -out_data[out_idx];
}
}
}
}
}
THTensor_(free)(in);
return 1;
}
static int icgnn_(IcgGeneralizedNablaT_updateGradInput)(lua_State *L)
{
THTensor *in = luaT_checkudata(L, 2, torch_Tensor);
THTensor *grad_out = luaT_checkudata(L, 3, torch_Tensor);
int neg = luaT_getfieldcheckboolean(L, 1, "neg");
THTensor* dir = luaT_getfieldcheckudata(L, 1, "directions", torch_Tensor);
THTensor *out = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
THTensor *grad_in = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor);
THTensor_(resizeAs)(grad_in, in);
real* in_data = THTensor_(data)(in);
real* out_data = THTensor_(data)(out);
real* grad_in_data = THTensor_(data)(grad_in);
real* grad_out_data = THTensor_(data)(grad_out);
real* dir_data = THTensor_(data)(dir);
long n_dir = dir->size[1];
long n_dim = in->nDimension;
long num, channels, height, width, out_channels;
if(n_dim == 3) {
num = 1;
channels = in->size[0];
height = in->size[1];
width = in->size[2];
out_channels = channels / n_dir;
}
else if(n_dim == 4) {
num = in->size[0];
channels = in->size[1];
height = in->size[2];
width = in->size[3];
out_channels = channels / n_dir;
}
THTensor_(zero)(grad_in);
neg = neg ? -1 : 1;
long offset = height * width;
long n;
#pragma omp parallel for private(n)
for(n = 0; n < num; ++n) {
long c;
for(c = 0; c < channels; c += n_dir) {
long h;
for(h = 0; h < height; ++h) {
long w;
for(w = 0; w < width; ++w) {
long out_idx = ((n * out_channels + c/n_dir) * height + h) * width + w;
int dir_idx = 0;
for(dir_idx = 0; dir_idx < n_dir; ++dir_idx) {
// minus (-) dir data, because it has to be inv to GeneralizedNabla
long dir_x = -dir_data[dir_idx];
long dir_y = -dir_data[n_dir + dir_idx];
long in_idx = ((n * channels + c+dir_idx) * height + h) * width + w;
if(w+dir_x >= 0 && h+dir_y >= 0 && w+dir_x < width && h+dir_y < height) {
grad_in_data[in_idx + dir_y * width + dir_x] += neg * grad_out_data[out_idx];
}
if(w-dir_x >= 0 && h-dir_y >= 0 && w-dir_x < width && h-dir_y < height) {
grad_in_data[in_idx] += -neg * grad_out_data[out_idx];
}
}
}
}
}
}
return 1;
}
static const struct luaL_Reg icgnn_(IcgGeneralizedNablaT__) [] = {
{"IcgGeneralizedNablaT_updateOutput", icgnn_(IcgGeneralizedNablaT_updateOutput)},
{"IcgGeneralizedNablaT_updateGradInput", icgnn_(IcgGeneralizedNablaT_updateGradInput)},
{NULL, NULL}
};
static void icgnn_(IcgGeneralizedNablaT_init)(lua_State *L)
{
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, icgnn_(IcgGeneralizedNablaT__), "icgnn");
lua_pop(L,1);
}
#endif
|
grid_astar.h | /*
* Copyright (c) 2014-2017, the neonavigation authors
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PLANNER_CSPACE_GRID_ASTAR_H
#define PLANNER_CSPACE_GRID_ASTAR_H
#include <memory>
#define _USE_MATH_DEFINES
#include <cmath>
#include <cfloat>
#include <list>
#include <map>
#include <unordered_map>
#include <vector>
#include <boost/chrono.hpp>
#include <planner_cspace/reservable_priority_queue.h>
#include <planner_cspace/cyclic_vec.h>
#include <planner_cspace/blockmem_gridmap.h>
#include <omp.h>
template <int DIM = 3, int NONCYCLIC = 2>
class GridAstar
{
public:
using Vec = CyclicVecInt<DIM, NONCYCLIC>;
using Vecf = CyclicVecFloat<DIM, NONCYCLIC>;
template <class T, int block_width = 0x20>
class Gridmap : public BlockMemGridmap<T, DIM, NONCYCLIC, block_width>
{
using BlockMemGridmap<T, DIM, NONCYCLIC, block_width>::BlockMemGridmap;
};
class PriorityVec
{
public:
float p_;
float p_raw_;
Vec v_;
PriorityVec()
{
p_ = 0;
}
PriorityVec(const float& p, const float& p_raw, const Vec& v)
{
p_ = p;
p_raw_ = p_raw;
v_ = v;
}
bool operator<(const PriorityVec& b) const
{
// smaller first
return p_ > b.p_;
}
};
class GridmapUpdate
{
private:
const Vec p0_;
const Vec p1_;
const float cost_estim_;
const float cost_;
public:
GridmapUpdate(
const Vec& p0, const Vec& p1,
const float cost_estim, const float cost)
: p0_(p0)
, p1_(p1)
, cost_estim_(cost_estim)
, cost_(cost)
{
}
const Vec& getParentPos() const
{
return p0_;
}
const Vec& getPos() const
{
return p1_;
}
const float getCost() const
{
return cost_;
}
const PriorityVec getPriorityVec() const
{
return PriorityVec(cost_estim_, cost_, p1_);
}
};
public:
constexpr int getDim() const
{
return DIM;
}
constexpr int getNoncyclic() const
{
return NONCYCLIC;
}
void setSearchTaskNum(const size_t& search_task_num)
{
search_task_num_ = search_task_num;
}
void reset(const Vec size)
{
g_.reset(size);
g_.clear(FLT_MAX);
parents_.reserve(g_.ser_size() / 16);
open_.reserve(g_.ser_size() / 16);
}
GridAstar()
: queue_size_limit_(0)
, search_task_num_(1)
{
}
explicit GridAstar(const Vec size)
{
reset(size);
queue_size_limit_ = 0;
}
void setQueueSizeLimit(const size_t size)
{
queue_size_limit_ = size;
}
bool search(
const Vec& s, const Vec& e,
std::list<Vec>& path,
std::function<float(const Vec&, Vec&, const Vec&, const Vec&)> cb_cost,
std::function<float(const Vec&, const Vec&)> cb_cost_estim,
std::function<std::vector<Vec>&(const Vec&, const Vec&, const Vec&)> cb_search,
std::function<bool(const std::list<Vec>&)> cb_progress,
const float cost_leave,
const float progress_interval,
const bool return_best = false)
{
return searchImpl(g_, s, e, path,
cb_cost, cb_cost_estim, cb_search, cb_progress,
cost_leave, progress_interval, return_best);
}
protected:
bool searchImpl(
Gridmap<float>& g,
const Vec& st, const Vec& en,
std::list<Vec>& path,
std::function<float(const Vec&, Vec&, const Vec&, const Vec&)> cb_cost,
std::function<float(const Vec&, const Vec&)> cb_cost_estim,
std::function<std::vector<Vec>&(const Vec&, const Vec&, const Vec&)> cb_search,
std::function<bool(const std::list<Vec>&)> cb_progress,
const float cost_leave,
const float progress_interval,
const bool return_best = false)
{
if (st == en)
{
return false;
}
Vec s = st;
Vec e = en;
for (int i = NONCYCLIC; i < DIM; i++)
{
s.cycleUnsigned(g.size());
e.cycleUnsigned(g.size());
}
g.clear(FLT_MAX);
open_.clear();
parents_.clear();
g[s] = 0;
open_.push(PriorityVec(cb_cost_estim(s, e), 0, s));
auto ts = boost::chrono::high_resolution_clock::now();
Vec better = s;
int cost_estim_min = cb_cost_estim(s, e);
while (true)
{
// Fetch tasks to be paralellized
if (open_.size() < 1)
{
// No fesible path
if (return_best)
{
findPath(s, better, path);
}
return false;
}
bool found(false);
std::vector<PriorityVec> centers;
for (size_t i = 0; i < search_task_num_; ++i)
{
if (open_.size() == 0)
break;
PriorityVec center = open_.top();
open_.pop();
if (center.v_ == e || center.p_ - center.p_raw_ <= cost_leave)
{
e = center.v_;
found = true;
break;
}
centers.push_back(center);
}
if (found)
break;
auto tnow = boost::chrono::high_resolution_clock::now();
if (boost::chrono::duration<float>(tnow - ts).count() >= progress_interval)
{
std::list<Vec> path_tmp;
ts = tnow;
findPath(s, better, path_tmp);
cb_progress(path_tmp);
}
#pragma omp parallel
{
std::list<GridmapUpdate> updates;
std::list<Vec> dont;
#pragma omp for schedule(static)
for (auto it = centers.cbegin(); it < centers.cend(); ++it)
{
const Vec p = it->v_;
const float c = it->p_raw_;
const float c_estim = it->p_;
if (c > g[p])
continue;
if (c_estim - c < cost_estim_min)
{
cost_estim_min = c_estim - c;
better = p;
}
const std::vector<Vec> search_list = cb_search(p, s, e);
bool updated(false);
for (auto it = search_list.cbegin(); it < search_list.cend(); ++it)
{
while (1)
{
Vec next = p + *it;
next.cycleUnsigned(g.size());
if (next.isExceeded(g.size()))
break;
if (g[next] < 0)
break;
const float cost_estim = cb_cost_estim(next, e);
if (cost_estim < 0 || cost_estim == FLT_MAX)
break;
const float cost = cb_cost(p, next, s, e);
if (cost < 0 || cost == FLT_MAX)
break;
const float cost_next = c + cost;
if (g[next] > cost_next)
{
updated = true;
updates.push_back(
GridmapUpdate(p, next, cost_next + cost_estim, cost_next));
}
break;
}
}
if (!updated)
dont.push_back(p);
}
#pragma omp critical
{
for (const GridmapUpdate& u : updates)
{
if (g[u.getPos()] > u.getCost())
{
g[u.getPos()] = u.getCost();
parents_[u.getPos()] = u.getParentPos();
open_.push(u.getPriorityVec());
if (queue_size_limit_ > 0 && open_.size() > queue_size_limit_)
open_.pop_back();
}
}
for (const Vec& p : dont)
{
g[p] = -1;
}
} // omp critical
} // omp parallel
}
return findPath(s, e, path);
}
bool findPath(const Vec& s, const Vec& e, std::list<Vec>& path)
{
Vec n = e;
while (true)
{
path.push_front(n);
if (n == s)
break;
if (parents_.find(n) == parents_.end())
return false;
const Vec child = n;
n = parents_[child];
parents_.erase(child);
}
return true;
}
Gridmap<float> g_;
std::unordered_map<Vec, Vec, Vec> parents_;
reservable_priority_queue<PriorityVec> open_;
size_t queue_size_limit_;
size_t search_task_num_;
};
#endif // PLANNER_CSPACE_GRID_ASTAR_H
|
stream.c | /*-----------------------------------------------------------------------*/
/* Program: Stream */
/* Revision: $Id$ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in GB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2003: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
#include <hpcc.h>
#include <float.h>
#include <limits.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#define TUNED 1
#define VERBOSE 1
/* INSTRUCTIONS:
*
* 1) Stream requires a good bit of memory to run. Adjust the
* value of 'N' (below) to give a 'timing calibration' of
* at least 20 clock-ticks. This will provide rate estimates
* that should be good to about 5% precision.
*/
static int VectorSize;
# define N 2000000
# define NTIMES 10
# define OFFSET 0
/*
* 3) Compile the code with full optimization. Many compilers
* generate unreasonably bad code before the optimizer tightens
* things up. If the results are unreasonably good, on the
* other hand, the optimizer might be too smart for me!
*
* Try compiling with:
* cc -O stream_omp.c -o stream_omp
*
* This is known to work on Cray, SGI, IBM, and Sun machines.
*
*
* 4) Mail the results to mccalpin@cs.virginia.edu
* Be sure to include:
* a) computer hardware model number and software revision
* b) the compiler flags
* c) all of the output from the test case.
* Thanks!
*
*/
# define HLINE "-------------------------------------------------------------\n"
static double *a, *b, *c;
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(double),
2 * sizeof(double),
3 * sizeof(double),
3 * sizeof(double)
};
#define mysecond MPI_Wtime
#ifdef TUNED
extern void tuned_STREAM_Copy(void);
extern void tuned_STREAM_Scale(double scalar);
extern void tuned_STREAM_Add(void);
extern void tuned_STREAM_Triad(double scalar);
#endif
static void
checkSTREAMresults (FILE *outFile, int doIO, int *failure)
{
double aj,bj,cj,scalar;
double asum,bsum,csum;
double epsilon;
int j,k;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
aj = aj * (double) VectorSize;
bj = bj * (double) VectorSize;
cj = cj * (double) VectorSize;
asum = 0.0;
bsum = 0.0;
csum = 0.0;
for (j=0; j<VectorSize; j++) {
asum += a[j];
bsum += b[j];
csum += c[j];
}
#ifdef VERBOSE
if (doIO) {
fprintf( outFile, "Results Comparison: \n");
fprintf( outFile, " Expected : %f %f %f \n",aj,bj,cj);
fprintf( outFile, " Observed : %f %f %f \n",asum,bsum,csum);
}
#endif
epsilon = 1.e-8;
*failure = 1;
if (fabs(aj-asum)/asum > epsilon) {
if (doIO) {
fprintf( outFile, "Failed Validation on array a[]\n");
fprintf( outFile, " Expected : %f \n",aj);
fprintf( outFile, " Observed : %f \n",asum);
}
}
else if (fabs(bj-bsum)/bsum > epsilon) {
if (doIO) {
fprintf( outFile, "Failed Validation on array b[]\n");
fprintf( outFile, " Expected : %f \n",bj);
fprintf( outFile, " Observed : %f \n",bsum);
}
}
else if (fabs(cj-csum)/csum > epsilon) {
if (doIO) {
fprintf( outFile, "Failed Validation on array c[]\n");
fprintf( outFile, " Expected : %f \n",cj);
fprintf( outFile, " Observed : %f \n",csum);
}
}
else {
*failure = 0;
if (doIO) fprintf( outFile, "Solution Validates\n");
}
}
# define M 20
static int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = Mmin(minDelta, Mmax(Delta,0));
}
return(minDelta);
}
#undef M
int
HPCC_Stream(HPCC_Params *params, int doIO, double *copyGBs, double *scaleGBs, double *addGBs,
double *triadGBs, int *failure) {
int quantum;
int BytesPerWord;
register int j, k;
double scalar, t, times[4][NTIMES];
FILE *outFile;
double GiBs = 1073741824.0, curGBs;
if (doIO) {
outFile = fopen( params->outFname, "a" );
if (! outFile) {
outFile = stderr;
fprintf( outFile, "Cannot open output file.\n" );
return 1;
}
}
VectorSize = HPCC_LocalVectorSize( params, 3, sizeof(double), 0 ); /* Need 3 vectors */
params->StreamVectorSize = VectorSize;
a = HPCC_XMALLOC( double, VectorSize );
b = HPCC_XMALLOC( double, VectorSize );
c = HPCC_XMALLOC( double, VectorSize );
if (!a || !b || !c) {
if (c) HPCC_free(c);
if (b) HPCC_free(b);
if (a) HPCC_free(a);
if (doIO) {
fprintf( outFile, "Failed to allocate memory (%d).\n", VectorSize );
fflush( outFile );
fclose( outFile );
}
return 1;
}
/* --- SETUP --- determine precision and check timing --- */
if (doIO) {
fprintf( outFile, HLINE);
BytesPerWord = sizeof(double);
fprintf( outFile, "This system uses %d bytes per DOUBLE PRECISION word.\n",
BytesPerWord);
fprintf( outFile, HLINE);
fprintf( outFile, "Array size = %d, Offset = %d\n" , VectorSize, OFFSET);
fprintf( outFile, "Total memory required = %.4f GiB.\n",
(3.0 * BytesPerWord) * ( (double) VectorSize / GiBs));
fprintf( outFile, "Each test is run %d times, but only\n", NTIMES);
fprintf( outFile, "the *best* time for each is used.\n");
}
#ifdef _OPENMP
if (doIO) fprintf( outFile, HLINE);
#pragma omp parallel private(k)
{
#pragma omp single nowait
{
k = omp_get_num_threads();
if (doIO) fprintf( outFile, "Number of Threads requested = %i\n",k);
params->StreamThreads = k;
}
}
#endif
/* Get initial value for system clock. */
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<VectorSize; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
if (doIO) fprintf( outFile, HLINE);
if ( (quantum = checktick()) >= 1) {
if (doIO) fprintf( outFile, "Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
} else {
if (doIO) fprintf( outFile, "Your clock granularity appears to be "
"less than one microsecond.\n");
}
t = mysecond();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j = 0; j < VectorSize; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (mysecond() - t);
if (doIO) {
fprintf( outFile, "Each test below will take on the order"
" of %d microseconds.\n", (int) t );
fprintf( outFile, " (= %d clock ticks)\n", (int) (t/quantum) );
fprintf( outFile, "Increase the size of the arrays if this shows that\n");
fprintf( outFile, "you are not getting at least 20 clock ticks per test.\n");
fprintf( outFile, HLINE);
fprintf( outFile, "WARNING -- The above is only a rough guideline.\n");
fprintf( outFile, "For best results, please be sure you know the\n");
fprintf( outFile, "precision of your system timer.\n");
fprintf( outFile, HLINE);
}
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Copy();
#else
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<VectorSize; j++)
c[j] = a[j];
#endif
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Scale(scalar);
#else
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<VectorSize; j++)
b[j] = scalar*c[j];
#endif
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Add();
#else
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<VectorSize; j++)
c[j] = a[j]+b[j];
#endif
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Triad(scalar);
#else
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<VectorSize; j++)
a[j] = b[j]+scalar*c[j];
#endif
times[3][k] = mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = Mmin(mintime[j], times[j][k]);
maxtime[j] = Mmax(maxtime[j], times[j][k]);
}
}
if (doIO)
fprintf( outFile, "Function Rate (GB/s) Avg time Min time Max time\n");
for (j=0; j<4; j++) {
avgtime[j] /= (double)(NTIMES - 1); /* note -- skip first iteration */
/* make sure no division by zero */
curGBs = (mintime[j] > 0.0 ? 1.0 / mintime[j] : -1.0);
curGBs *= 1e-9 * bytes[j] * VectorSize;
if (doIO)
fprintf( outFile, "%s%11.4f %11.4f %11.4f %11.4f\n", label[j],
curGBs,
avgtime[j],
mintime[j],
maxtime[j]);
switch (j) {
case 0: *copyGBs = curGBs; break;
case 1: *scaleGBs = curGBs; break;
case 2: *addGBs = curGBs; break;
case 3: *triadGBs = curGBs; break;
}
}
if (doIO) fprintf( outFile, HLINE);
/* --- Check Results --- */
checkSTREAMresults( outFile, doIO, failure );
if (doIO) fprintf( outFile, HLINE);
HPCC_free(c);
HPCC_free(b);
HPCC_free(a);
if (doIO) {
fflush( outFile );
fclose( outFile );
}
return 0;
}
void tuned_STREAM_Copy()
{
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<VectorSize; j++)
c[j] = a[j];
}
void tuned_STREAM_Scale(double scalar)
{
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<VectorSize; j++)
b[j] = scalar*c[j];
}
void tuned_STREAM_Add()
{
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<VectorSize; j++)
c[j] = a[j]+b[j];
}
void tuned_STREAM_Triad(double scalar)
{
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<VectorSize; j++)
a[j] = b[j]+scalar*c[j];
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(24*t2-Nz,4)),3*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(12*t1+Ny+21,4)),floord(24*t2+Ny+20,4)),floord(24*t1-24*t2+Nz+Ny+19,4));t3++) {
for (t4=max(max(max(0,ceild(3*t1-255,256)),ceild(24*t2-Nz-1020,1024)),ceild(4*t3-Ny-1020,1024));t4<=min(min(min(min(floord(4*t3+Nx,1024),floord(Nt+Nx-4,1024)),floord(12*t1+Nx+21,1024)),floord(24*t2+Nx+20,1024)),floord(24*t1-24*t2+Nz+Nx+19,1024));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),4*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),4*t3+2),1024*t4+1022),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(1024*t4,t5+1);
ubv=min(1024*t4+1023,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
lqsort_kernel.h | #pragma omp target teams num_teams(done_size) thread_limit(LQSORT_LOCAL_WORKGROUP_SIZE)
{
workstack_record workstack[QUICKSORT_BLOCK_SIZE/SORT_THRESHOLD];
int workstack_pointer;
T mys[QUICKSORT_BLOCK_SIZE], mysn[QUICKSORT_BLOCK_SIZE], temp[SORT_THRESHOLD];
T *s, *sn;
uint ltsum, gtsum;
uint lt[LQSORT_LOCAL_WORKGROUP_SIZE+1], gt[LQSORT_LOCAL_WORKGROUP_SIZE+1];
#pragma omp parallel
{
const uint blockid = omp_get_team_num();
const uint localid = omp_get_thread_num();
// workstack: stores the start and end of the sequences, direction of sort
// If the sequence is less that SORT_THRESHOLD, it gets sorted.
// It will only be pushed on the stack if it greater than the SORT_THRESHOLD.
// Note, that the sum of ltsum + gtsum is less than QUICKSORT_BLOCK_SIZE.
// The total sum of the length of records on the stack cannot exceed QUICKSORT_BLOCK_SIZE,
// but each individual record should be greater than SORT_THRESHOLD, so the maximum length
// of the stack is QUICKSORT_BLOCK_SIZE/SORT_THRESHOLD - in the case of BDW GT2 the length
// of the stack is 2 :)
uint i, tmp, ltp, gtp;
work_record<T> block = seqs[blockid];
const uint d_offset = block.start;
uint start = 0;
uint end = block.end - d_offset;
uint direction = 1; // which direction to sort
// initialize workstack and workstack_pointer: push the initial sequence on the stack
if (localid == 0) {
workstack_pointer = 0; // beginning of the stack
workstack_record wr = { start, end, direction };
workstack[0] = wr;
}
// copy block of data to be sorted by one workgroup into __shared__ memory
// note that indeces of __shared__ data go from 0 to end-start-1
if (block.direction == 1) {
for (i = localid; i < end; i += LQSORT_LOCAL_WORKGROUP_SIZE) {
mys[i] = d[i+d_offset];
}
} else {
for (i = localid; i < end; i += LQSORT_LOCAL_WORKGROUP_SIZE) {
mys[i] = dn[i+d_offset];
}
}
#pragma omp barrier
while (workstack_pointer >= 0) {
// pop up the stack
workstack_record wr = workstack[workstack_pointer];
start = wr.start;
end = wr.end;
direction = wr.direction;
#pragma omp barrier
if (localid == 0) {
--workstack_pointer;
ltsum = gtsum = 0;
}
if (direction == 1) {
s = mys;
sn = mysn;
} else {
s = mysn;
sn = mys;
}
// Set thread __shared__ counters to zero
lt[localid] = gt[localid] = 0;
ltp = gtp = 0;
#pragma omp barrier
// Pick a pivot
uint pivot = s[start];
if (start < end) {
pivot = median(pivot, s[(start+end) >> 1], s[end-1]);
}
// Align work item accesses for coalesced reads.
// Go through data...
for(i = start + localid; i < end; i += LQSORT_LOCAL_WORKGROUP_SIZE) {
tmp = s[i];
// counting elements that are smaller ...
if (tmp < pivot)
ltp++;
// or larger compared to the pivot.
if (tmp > pivot)
gtp++;
}
lt[localid] = ltp;
gt[localid] = gtp;
#pragma omp barrier
// calculate cumulative sums
uint n;
for(i = 1; i < LQSORT_LOCAL_WORKGROUP_SIZE; i <<= 1) {
n = 2*i - 1;
if ((localid & n) == n) {
lt[localid] += lt[localid-i];
gt[localid] += gt[localid-i];
}
#pragma omp barrier
}
if ((localid & n) == n) {
lt[LQSORT_LOCAL_WORKGROUP_SIZE] = ltsum = lt[localid];
gt[LQSORT_LOCAL_WORKGROUP_SIZE] = gtsum = gt[localid];
lt[localid] = 0;
gt[localid] = 0;
}
for(i = LQSORT_LOCAL_WORKGROUP_SIZE/2; i >= 1; i >>= 1) {
n = 2*i - 1;
if ((localid & n) == n) {
plus_prescan(<[localid - i], <[localid]);
plus_prescan(>[localid - i], >[localid]);
}
#pragma omp barrier
}
// Allocate locations for work items
uint lfrom = start + lt[localid];
uint gfrom = end - gt[localid+1];
// go thru data again writing elements to their correct position
for (i = start + localid; i < end; i += LQSORT_LOCAL_WORKGROUP_SIZE) {
tmp = s[i];
// increment counts
if (tmp < pivot)
sn[lfrom++] = tmp;
if (tmp > pivot)
sn[gfrom++] = tmp;
}
#pragma omp barrier
// Store the pivot value between the new sequences
for (i = start + ltsum + localid;i < end - gtsum; i += LQSORT_LOCAL_WORKGROUP_SIZE) {
d[i+d_offset] = pivot;
}
#pragma omp barrier
// if the sequence is shorter than SORT_THRESHOLD
// sort it using an alternative sort and place result in d
if (ltsum <= SORT_THRESHOLD) {
sort_threshold(sn, d+d_offset, start, start + ltsum, temp, localid);
} else {
PUSH(start, start + ltsum);
#pragma omp barrier
}
if (gtsum <= SORT_THRESHOLD) {
sort_threshold(sn, d+d_offset, end - gtsum, end, temp, localid);
} else {
PUSH(end - gtsum, end);
#pragma omp barrier
}
}
}
}
|
DRB091-threadprivate2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A file-scope variable used within a function called by a parallel region.
Use threadprivate to avoid data races.
This is the case for a variable referenced within a construct.
*/
#include <stdio.h>
#include <assert.h>
int sum0=0, sum1=0;
#pragma omp threadprivate(sum0)
int main()
{
int len=1000;
int i, sum=0;
#pragma omp parallel copyin(sum0)
{
#pragma omp for schedule(dynamic)
for (i=0;i<len;i++)
{
sum0=sum0+i;
}
#pragma omp critical
{
sum= sum+sum0;
}
}
/* reference calculation */
for (i=0;i<len;i++)
{
sum1=sum1+i;
}
printf("sum=%d; sum1=%d\n",sum,sum1);
assert(sum==sum1);
return 0;
}
|
sparse_msg3_setup_rap.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
/*--------------------------------------------------------------------------
* Macro to "change coordinates". This routine is written as though
* coarsening is being done in the z-direction. This macro is used to
* allow for coarsening to be done in the x- and y-directions also.
*--------------------------------------------------------------------------*/
#define MapIndex(in_index, cdir, out_index) \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 2); \
cdir = (cdir + 1) % 3; \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 0); \
cdir = (cdir + 1) % 3; \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 1); \
cdir = (cdir + 1) % 3;
/*--------------------------------------------------------------------------
* hypre_SparseMSG3CreateRAPOp
* Sets up new coarse grid operator stucture.
*--------------------------------------------------------------------------*/
hypre_StructMatrix *
hypre_SparseMSG3CreateRAPOp( hypre_StructMatrix *R,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructGrid *coarse_grid,
HYPRE_Int cdir )
{
hypre_StructMatrix *RAP;
hypre_Index *RAP_stencil_shape;
hypre_StructStencil *RAP_stencil;
HYPRE_Int RAP_stencil_size;
HYPRE_Int RAP_stencil_dim;
HYPRE_Int RAP_num_ghost[] = {1, 1, 1, 1, 1, 1};
hypre_StructStencil *A_stencil;
HYPRE_Int A_stencil_size;
hypre_Index index_temp;
HYPRE_Int k, j, i;
HYPRE_Int stencil_rank;
RAP_stencil_dim = 3;
A_stencil = hypre_StructMatrixStencil(A);
A_stencil_size = hypre_StructStencilSize(A_stencil);
/*-----------------------------------------------------------------------
* Define RAP_stencil
*-----------------------------------------------------------------------*/
stencil_rank = 0;
/*-----------------------------------------------------------------------
* non-symmetric case
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* 7-point fine grid stencil produces 19 point RAP
*
* Store all 27 elements except for the corners.
*
* For symmetric A, only store the lower triangular part, where
* lower triangular means the lower triangular part on the matrix
* in the standard lexicographic ordering.
*-----------------------------------------------------------------------*/
if( A_stencil_size == 7)
{
RAP_stencil_size = 19;
if (hypre_StructMatrixSymmetric(A))
{
RAP_stencil_size = (RAP_stencil_size + 1) / 2;
}
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (k = -1; k < 2; k++)
{
for (j = -1; j < 2; j++)
{
for (i = -1; i < 2; i++)
{
if ((i*j*k == 0) && (stencil_rank < RAP_stencil_size))
{
hypre_SetIndex3(index_temp,i,j,k);
MapIndex(index_temp, cdir,
RAP_stencil_shape[stencil_rank]);
stencil_rank++;
}
}
}
}
}
/*-----------------------------------------------------------------------
* 19 or 27 point fine grid stencil produces 27 point RAP
*
* Store all 27 elements
*
* For symmetric A, only store the lower triangular part, where
* lower triangular means the lower triangular part on the matrix
* in the standard lexicographic ordering.
*-----------------------------------------------------------------------*/
else
{
RAP_stencil_size = 27;
if (hypre_StructMatrixSymmetric(A))
{
RAP_stencil_size = (RAP_stencil_size + 1) / 2;
}
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (k = -1; k < 2; k++)
{
for (j = -1; j < 2; j++)
{
for (i = -1; i < 2; i++)
{
if (stencil_rank < RAP_stencil_size)
{
hypre_SetIndex3(index_temp,i,j,k);
MapIndex(index_temp, cdir,
RAP_stencil_shape[stencil_rank]);
stencil_rank++;
}
}
}
}
}
RAP_stencil = hypre_StructStencilCreate(RAP_stencil_dim, RAP_stencil_size,
RAP_stencil_shape);
RAP = hypre_StructMatrixCreate(hypre_StructMatrixComm(A),
coarse_grid, RAP_stencil);
hypre_StructStencilDestroy(RAP_stencil);
/*-----------------------------------------------------------------------
* Coarse operator in symmetric iff fine operator is
*-----------------------------------------------------------------------*/
hypre_StructMatrixSymmetric(RAP) = hypre_StructMatrixSymmetric(A);
/*-----------------------------------------------------------------------
* Set number of ghost points - one one each boundary
*-----------------------------------------------------------------------*/
hypre_StructMatrixSetNumGhost(RAP, RAP_num_ghost);
return RAP;
}
/*--------------------------------------------------------------------------
* Routines to build RAP. These routines are fairly general
* 1) No assumptions about symmetry of A
* 2) No assumption that R = transpose(P)
* 3) 7, 19 or 27-point fine grid A
*
* I am, however, assuming that the c-to-c interpolation is the identity.
*
* I've written a two routines - hypre_SparseMSG3BuildRAPSym to build the lower
* triangular part of RAP (including the diagonal) and
* hypre_SparseMSG3BuildRAPNoSym to build the upper triangular part of RAP
* (excluding the diagonal). So using symmetric storage, only the first
* routine would be called. With full storage both would need to be called.
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SparseMSG3BuildRAPSym( hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_Index stridePR,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructStencil *fine_stencil;
HYPRE_Int fine_stencil_size;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index Pstart;
hypre_Index loop_size;
HYPRE_Int fi, ci;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
HYPRE_Real *pa, *pb;
HYPRE_Real *ra, *rb;
HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
HYPRE_Real *a_ac, *a_aw, *a_as;
HYPRE_Real *a_bc, *a_bw, *a_be, *a_bs, *a_bn;
HYPRE_Real *a_csw, *a_cse, *a_cnw, *a_cne;
HYPRE_Real *a_asw, *a_ase;
HYPRE_Real *a_bsw, *a_bse, *a_bnw, *a_bne;
HYPRE_Real *rap_cc, *rap_cw, *rap_cs;
HYPRE_Real *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn;
HYPRE_Real *rap_csw, *rap_cse;
HYPRE_Real *rap_bsw, *rap_bse, *rap_bnw, *rap_bne;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
HYPRE_Int ierr = 0;
fine_stencil = hypre_StructMatrixStencil(A);
fine_stencil_size = hypre_StructStencilSize(fine_stencil);
stridef = cstride;
hypre_SetIndex3(stridec, 1, 1, 1);
fgrid = hypre_StructMatrixGrid(A);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
hypre_StructMapCoarseToFine(cstart, cindex, stridePR, Pstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex3(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex3(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_BoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
a_bc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 19-point fine grid operator:
*
* a_aw is pointer for west coefficient in plane above
* a_ae is pointer for east coefficient in plane above
* a_as is pointer for south coefficient in plane above
* a_an is pointer for north coefficient in plane above
* a_bw is pointer for west coefficient in plane below
* a_be is pointer for east coefficient in plane below
* a_bs is pointer for south coefficient in plane below
* a_bn is pointer for north coefficient in plane below
* a_csw is pointer for southwest coefficient in same plane
* a_cse is pointer for southeast coefficient in same plane
* a_cnw is pointer for northwest coefficient in same plane
* a_cne is pointer for northeast coefficient in same plane
*-----------------------------------------------------------------*/
if (fine_stencil_size > 7)
{
hypre_SetIndex3(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,0,-1);
MapIndex(index_temp, cdir, index);
a_bw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,-1,-1);
MapIndex(index_temp, cdir, index);
a_bs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
}
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point fine grid operator:
*
* a_asw is pointer for southwest coefficient in plane above
* a_ase is pointer for southeast coefficient in plane above
* a_anw is pointer for northwest coefficient in plane above
* a_ane is pointer for northeast coefficient in plane above
* a_bsw is pointer for southwest coefficient in plane below
* a_bse is pointer for southeast coefficient in plane below
* a_bnw is pointer for northwest coefficient in plane below
* a_bne is pointer for northeast coefficient in plane below
*-----------------------------------------------------------------*/
if (fine_stencil_size > 19)
{
hypre_SetIndex3(index_temp,-1,-1,1);
MapIndex(index_temp, cdir, index);
a_asw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,-1,1);
MapIndex(index_temp, cdir, index);
a_ase = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,-1,-1);
MapIndex(index_temp, cdir, index);
a_bsw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,-1,-1);
MapIndex(index_temp, cdir, index);
a_bse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,1,-1);
MapIndex(index_temp, cdir, index);
a_bnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,1,-1);
MapIndex(index_temp, cdir, index);
a_bne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
}
/*-----------------------------------------------------------------
* Extract pointers for 19-point coarse grid operator:
*
* We build only the lower triangular part (plus diagonal).
*
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,-1,0,-1);
MapIndex(index_temp, cdir, index);
rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,0,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point coarse grid operator:
*
* A 27-point coarse grid operator is produced when the fine grid
* stencil is 19 or 27 point.
*
* We build only the lower triangular part.
*
* rap_csw is pointer for southwest coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
if (fine_stencil_size > 7)
{
hypre_SetIndex3(index_temp,-1,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bsw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,1,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,-1,1,-1);
MapIndex(index_temp, cdir, index);
rap_bnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,1,1,-1);
MapIndex(index_temp, cdir, index);
rap_bne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
}
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
zOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
zOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
hypre_SetIndex3(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
/*--------------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
switch (fine_stencil_size)
{
/*--------------------------------------------------------------
* Loop for symmetric 7-point fine grid operator; produces a
* symmetric 19-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-south, below-west,
* below-center, below-east, below-north, center-south,
* center-west, and center-center).
*--------------------------------------------------------------*/
case 7:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
P_dbox, Pstart, stridePR, iP,
R_dbox, Pstart, stridePR, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc[iA] * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc[iAm1];
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs[iA]
+ rb[iR] * a_cs[iAm1] * pb[iP1]
+ ra[iR] * a_cs[iAp1] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1];
rap_csw[iAc] = 0.0;
rap_cse[iAc] = 0.0;
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac[iAm1]
+ ra[iR] * a_bc[iAp1]
+ a_bc[iA] * pb[iP]
+ a_ac[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
/*--------------------------------------------------------------
* Loop for symmetric 19-point fine grid operator; produces a
* symmetric 27-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-southwest, below-south,
* below-southeast, below-west, below-center, below-east,
* below-northwest, below-north, below-northeast, center-southwest,
* center-south, center-southeast, center-west, and center-center).
*--------------------------------------------------------------*/
case 19:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
P_dbox, Pstart, stridePR, iP,
R_dbox, Pstart, stridePR, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP - zOffsetP - yOffsetP - xOffsetP;
rap_bsw[iAc] = rb[iR] * a_csw[iAm1] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1]
+ rb[iR] * a_bs[iAm1]
+ a_bs[iA] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP + xOffsetP;
rap_bse[iAc] = rb[iR] * a_cse[iAm1] * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]
+ rb[iR] * a_bw[iAm1]
+ a_bw[iA] * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc[iA] * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc[iAm1];
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]
+ rb[iR] * a_be[iAm1]
+ a_be[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP - xOffsetP;
rap_bnw[iAc] = rb[iR] * a_cnw[iAm1] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1]
+ rb[iR] * a_bn[iAm1]
+ a_bn[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP + xOffsetP;
rap_bne[iAc] = rb[iR] * a_cne[iAm1] * pa[iP1];
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = a_csw[iA]
+ rb[iR] * a_csw[iAm1] * pb[iP1]
+ ra[iR] * a_csw[iAp1] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs[iA]
+ rb[iR] * a_cs[iAm1] * pb[iP1]
+ ra[iR] * a_cs[iAp1] * pa[iP1]
+ a_bs[iA] * pb[iP1]
+ a_as[iA] * pa[iP1]
+ rb[iR] * a_as[iAm1]
+ ra[iR] * a_bs[iAp1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = a_cse[iA]
+ rb[iR] * a_cse[iAm1] * pb[iP1]
+ ra[iR] * a_cse[iAp1] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1]
+ a_bw[iA] * pb[iP1]
+ a_aw[iA] * pa[iP1]
+ rb[iR] * a_aw[iAm1]
+ ra[iR] * a_bw[iAp1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac[iAm1]
+ ra[iR] * a_bc[iAp1]
+ a_bc[iA] * pb[iP]
+ a_ac[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
/*--------------------------------------------------------------
* Loop for symmetric 27-point fine grid operator; produces a
* symmetric 27-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-southwest, below-south,
* below-southeast, below-west, below-center, below-east,
* below-northwest, below-north, below-northeast, center-southwest,
* center-south, center-southeast, center-west, and center-center).
*--------------------------------------------------------------*/
default:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
P_dbox, Pstart, stridePR, iP,
R_dbox, Pstart, stridePR, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP - zOffsetP - yOffsetP - xOffsetP;
rap_bsw[iAc] = rb[iR] * a_csw[iAm1] * pa[iP1]
+ rb[iR] * a_bsw[iAm1]
+ a_bsw[iA] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1]
+ rb[iR] * a_bs[iAm1]
+ a_bs[iA] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP + xOffsetP;
rap_bse[iAc] = rb[iR] * a_cse[iAm1] * pa[iP1]
+ rb[iR] * a_bse[iAm1]
+ a_bse[iA] * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]
+ rb[iR] * a_bw[iAm1]
+ a_bw[iA] * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc[iA] * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc[iAm1];
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]
+ rb[iR] * a_be[iAm1]
+ a_be[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP - xOffsetP;
rap_bnw[iAc] = rb[iR] * a_cnw[iAm1] * pa[iP1]
+ rb[iR] * a_bnw[iAm1]
+ a_bnw[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1]
+ rb[iR] * a_bn[iAm1]
+ a_bn[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP + xOffsetP;
rap_bne[iAc] = rb[iR] * a_cne[iAm1] * pa[iP1]
+ rb[iR] * a_bne[iAm1]
+ a_bne[iA] * pa[iP1];
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = a_csw[iA]
+ rb[iR] * a_csw[iAm1] * pb[iP1]
+ ra[iR] * a_csw[iAp1] * pa[iP1]
+ a_bsw[iA] * pb[iP1]
+ a_asw[iA] * pa[iP1]
+ rb[iR] * a_asw[iAm1]
+ ra[iR] * a_bsw[iAp1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs[iA]
+ rb[iR] * a_cs[iAm1] * pb[iP1]
+ ra[iR] * a_cs[iAp1] * pa[iP1]
+ a_bs[iA] * pb[iP1]
+ a_as[iA] * pa[iP1]
+ rb[iR] * a_as[iAm1]
+ ra[iR] * a_bs[iAp1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = a_cse[iA]
+ rb[iR] * a_cse[iAm1] * pb[iP1]
+ ra[iR] * a_cse[iAp1] * pa[iP1]
+ a_bse[iA] * pb[iP1]
+ a_ase[iA] * pa[iP1]
+ rb[iR] * a_ase[iAm1]
+ ra[iR] * a_bse[iAp1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1]
+ a_bw[iA] * pb[iP1]
+ a_aw[iA] * pa[iP1]
+ rb[iR] * a_aw[iAm1]
+ ra[iR] * a_bw[iAp1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac[iAm1]
+ ra[iR] * a_bc[iAp1]
+ a_bc[iA] * pb[iP]
+ a_ac[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
} /* end switch statement */
} /* end ForBoxI */
return ierr;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SparseMSG3BuildRAPNoSym( hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_Index stridePR,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructStencil *fine_stencil;
HYPRE_Int fine_stencil_size;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index Pstart;
hypre_Index loop_size;
HYPRE_Int fi, ci;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
HYPRE_Real *pa, *pb;
HYPRE_Real *ra, *rb;
HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
HYPRE_Real *a_ac, *a_aw, *a_ae, *a_as, *a_an;
HYPRE_Real *a_be, *a_bn;
HYPRE_Real *a_csw, *a_cse, *a_cnw, *a_cne;
HYPRE_Real *a_asw, *a_ase, *a_anw, *a_ane;
HYPRE_Real *a_bnw, *a_bne;
HYPRE_Real *rap_ce, *rap_cn;
HYPRE_Real *rap_ac, *rap_aw, *rap_ae, *rap_as, *rap_an;
HYPRE_Real *rap_cnw, *rap_cne;
HYPRE_Real *rap_asw, *rap_ase, *rap_anw, *rap_ane;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
HYPRE_Int ierr = 0;
fine_stencil = hypre_StructMatrixStencil(A);
fine_stencil_size = hypre_StructStencilSize(fine_stencil);
stridef = cstride;
hypre_SetIndex3(stridec, 1, 1, 1);
fgrid = hypre_StructMatrixGrid(A);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
hypre_StructMapCoarseToFine(cstart, cindex, stridePR, Pstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex3(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex3(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_BoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 19-point fine grid operator:
*
* a_aw is pointer for west coefficient in plane above
* a_ae is pointer for east coefficient in plane above
* a_as is pointer for south coefficient in plane above
* a_an is pointer for north coefficient in plane above
* a_bw is pointer for west coefficient in plane below
* a_be is pointer for east coefficient in plane below
* a_bs is pointer for south coefficient in plane below
* a_bn is pointer for north coefficient in plane below
* a_csw is pointer for southwest coefficient in same plane
* a_cse is pointer for southeast coefficient in same plane
* a_cnw is pointer for northwest coefficient in same plane
* a_cne is pointer for northeast coefficient in same plane
*-----------------------------------------------------------------*/
if (fine_stencil_size > 7)
{
hypre_SetIndex3(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,0,1);
MapIndex(index_temp, cdir, index);
a_ae = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,1,1);
MapIndex(index_temp, cdir, index);
a_an = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
}
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point fine grid operator:
*
* a_asw is pointer for southwest coefficient in plane above
* a_ase is pointer for southeast coefficient in plane above
* a_anw is pointer for northwest coefficient in plane above
* a_ane is pointer for northeast coefficient in plane above
* a_bsw is pointer for southwest coefficient in plane below
* a_bse is pointer for southeast coefficient in plane below
* a_bnw is pointer for northwest coefficient in plane below
* a_bne is pointer for northeast coefficient in plane below
*-----------------------------------------------------------------*/
if (fine_stencil_size > 19)
{
hypre_SetIndex3(index_temp,-1,-1,1);
MapIndex(index_temp, cdir, index);
a_asw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,-1,1);
MapIndex(index_temp, cdir, index);
a_ase = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,1,1);
MapIndex(index_temp, cdir, index);
a_anw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,1,1);
MapIndex(index_temp, cdir, index);
a_ane = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,1,-1);
MapIndex(index_temp, cdir, index);
a_bnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,1,-1);
MapIndex(index_temp, cdir, index);
a_bne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
}
/*-----------------------------------------------------------------
* Extract pointers for 19-point coarse grid operator:
*
* We build only the upper triangular part (excluding diagonal).
*
* rap_ce is pointer for east coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rap_ac = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
rap_aw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,1,0,1);
MapIndex(index_temp, cdir, index);
rap_ae = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
rap_as = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,0,1,1);
MapIndex(index_temp, cdir, index);
rap_an = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point coarse grid operator:
*
* A 27-point coarse grid operator is produced when the fine grid
* stencil is 19 or 27 point.
*
* We build only the upper triangular part.
*
* rap_cnw is pointer for northwest coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
if (fine_stencil_size > 7)
{
hypre_SetIndex3(index_temp,-1,-1,1);
MapIndex(index_temp, cdir, index);
rap_asw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,1,-1,1);
MapIndex(index_temp, cdir, index);
rap_ase = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,-1,1,1);
MapIndex(index_temp, cdir, index);
rap_anw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,1,1,1);
MapIndex(index_temp, cdir, index);
rap_ane = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
}
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
zOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
zOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
hypre_SetIndex3(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
switch (fine_stencil_size)
{
/*--------------------------------------------------------------
* Loop for 7-point fine grid operator; produces upper triangular
* part of 19-point coarse grid operator. stencil entries:
* (above-north, above-east, above-center, above-west,
* above-south, center-north, and center-east).
*--------------------------------------------------------------*/
case 7:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
P_dbox, Pstart, stridePR, iP,
R_dbox, Pstart, stridePR, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac[iA] * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac[iAp1];
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn[iA]
+ rb[iR] * a_cn[iAm1] * pb[iP1]
+ ra[iR] * a_cn[iAp1] * pa[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1];
rap_cnw[iAc] = 0.0;
rap_cne[iAc] = 0.0;
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
/*--------------------------------------------------------------
* Loop for 19-point fine grid operator; produces upper triangular
* part of 27-point coarse grid operator. stencil entries:
* (above-northeast, above-north, above-northwest, above-east,
* above-center, above-west, above-southeast, above-south,
* above-southwest, center-northeast, center-north,
* center-northwest, and center-east).
*--------------------------------------------------------------*/
case 19:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
P_dbox, Pstart, stridePR, iP,
R_dbox, Pstart, stridePR, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP + zOffsetP + yOffsetP + xOffsetP;
rap_ane[iAc] = ra[iR] * a_cne[iAp1] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1]
+ ra[iR] * a_an[iAp1]
+ a_an[iA] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP - xOffsetP;
rap_anw[iAc] = ra[iR] * a_cnw[iAp1] * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]
+ ra[iR] * a_ae[iAp1]
+ a_ae[iA] * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac[iA] * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac[iAp1];
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]
+ ra[iR] * a_aw[iAp1]
+ a_aw[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP + xOffsetP;
rap_ase[iAc] = ra[iR] * a_cse[iAp1] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1]
+ ra[iR] * a_as[iAp1]
+ a_as[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP - xOffsetP;
rap_asw[iAc] = ra[iR] * a_csw[iAp1] * pb[iP1];
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = a_cne[iA]
+ rb[iR] * a_cne[iAm1] * pb[iP1]
+ ra[iR] * a_cne[iAp1] * pa[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn[iA]
+ rb[iR] * a_cn[iAm1] * pb[iP1]
+ ra[iR] * a_cn[iAp1] * pa[iP1]
+ a_bn[iA] * pb[iP1]
+ a_an[iA] * pa[iP1]
+ rb[iR] * a_an[iAm1]
+ ra[iR] * a_bn[iAp1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = a_cnw[iA]
+ rb[iR] * a_cnw[iAm1] * pb[iP1]
+ ra[iR] * a_cnw[iAp1] * pa[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1]
+ a_be[iA] * pb[iP1]
+ a_ae[iA] * pa[iP1]
+ rb[iR] * a_ae[iAm1]
+ ra[iR] * a_be[iAp1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
/*--------------------------------------------------------------
* Loop for 27-point fine grid operator; produces upper triangular
* part of 27-point coarse grid operator. stencil entries:
* (above-northeast, above-north, above-northwest, above-east,
* above-center, above-west, above-southeast, above-south,
* above-southwest, center-northeast, center-north,
* center-northwest, and center-east).
*--------------------------------------------------------------*/
default:
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size,
P_dbox, Pstart, stridePR, iP,
R_dbox, Pstart, stridePR, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP + zOffsetP + yOffsetP + xOffsetP;
rap_ane[iAc] = ra[iR] * a_cne[iAp1] * pb[iP1]
+ ra[iR] * a_ane[iAp1]
+ a_ane[iA] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1]
+ ra[iR] * a_an[iAp1]
+ a_an[iA] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP - xOffsetP;
rap_anw[iAc] = ra[iR] * a_cnw[iAp1] * pb[iP1]
+ ra[iR] * a_anw[iAp1]
+ a_anw[iA] * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]
+ ra[iR] * a_ae[iAp1]
+ a_ae[iA] * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac[iA] * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac[iAp1];
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]
+ ra[iR] * a_aw[iAp1]
+ a_aw[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP + xOffsetP;
rap_ase[iAc] = ra[iR] * a_cse[iAp1] * pb[iP1]
+ ra[iR] * a_ase[iAp1]
+ a_ase[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1]
+ ra[iR] * a_as[iAp1]
+ a_as[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP - xOffsetP;
rap_asw[iAc] = ra[iR] * a_csw[iAp1] * pb[iP1]
+ ra[iR] * a_asw[iAp1]
+ a_asw[iA] * pb[iP1];
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = a_cne[iA]
+ rb[iR] * a_cne[iAm1] * pb[iP1]
+ ra[iR] * a_cne[iAp1] * pa[iP1]
+ a_bne[iA] * pb[iP1]
+ a_ane[iA] * pa[iP1]
+ rb[iR] * a_ane[iAm1]
+ ra[iR] * a_bne[iAp1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn[iA]
+ rb[iR] * a_cn[iAm1] * pb[iP1]
+ ra[iR] * a_cn[iAp1] * pa[iP1]
+ a_bn[iA] * pb[iP1]
+ a_an[iA] * pa[iP1]
+ rb[iR] * a_an[iAm1]
+ ra[iR] * a_bn[iAp1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = a_cnw[iA]
+ rb[iR] * a_cnw[iAm1] * pb[iP1]
+ ra[iR] * a_cnw[iAp1] * pa[iP1]
+ a_bnw[iA] * pb[iP1]
+ a_anw[iA] * pa[iP1]
+ rb[iR] * a_anw[iAm1]
+ ra[iR] * a_bnw[iAp1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1]
+ a_be[iA] * pb[iP1]
+ a_ae[iA] * pa[iP1]
+ rb[iR] * a_ae[iAm1]
+ ra[iR] * a_be[iAp1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
} /* end switch statement */
} /* end ForBoxI */
return ierr;
}
|
cvsAdvDiff_bnd_omp.c | /* -----------------------------------------------------------------
* Programmer(s): Daniel Reynolds and Ting Yan @ SMU
* Based on cvsAdvDiff_bnd.c and parallelized with OpenMP
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* Example problem:
*
* The following is a simple example problem with a banded Jacobian,
* solved using CVODES.
* The problem is the semi-discrete form of the advection-diffusion
* equation in 2-D:
* du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2
* on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time
* interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions
* are posed, and the initial condition is
* u(x,y,t=0) = x(2-x)y(1-y)exp(5xy).
* The PDE is discretized on a uniform MX+2 by MY+2 grid with
* central differencing, and with boundary values eliminated,
* leaving an ODE system of size NEQ = MX*MY.
* This program solves the problem with the BDF method, Newton
* iteration with the BAND linear solver, and a user-supplied
* Jacobian routine.
* It uses scalar relative and absolute tolerances.
* Output is printed at t = .1, .2, ..., 1.
* Run statistics (optional outputs) are printed at the end.
*
* Optionally, we can set the number of threads from environment
* variable or command line. To check the current value for number
* of threads from environment:
* % echo $OMP_NUM_THREADS
*
* Execution:
*
* To use the default value or the number of threads from the
* environment value, run without arguments:
* % ./cvsAdvDiff_bnd_omp
* The environment variable can be over-ridden with a command line
* argument specifying the number of threads to use, e.g:
* % ./cvsAdvDiff_bnd_omp 5
* ----------------------------------------------------------------- */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/* Header files with a description of contents */
#include <cvodes/cvodes.h> /* prototypes for CVODE fcts., consts. */
#include <nvector/nvector_openmp.h> /* serial N_Vector types, fcts., macros */
#include <sunmatrix/sunmatrix_band.h> /* access to band SUNMatrix */
#include <sunlinsol/sunlinsol_band.h> /* access to band SUNLinearSolver */
#include <sundials/sundials_types.h> /* definition of type realtype */
#ifdef _OPENMP
#include <omp.h>
#endif
/* Problem Constants */
#define XMAX RCONST(2.0) /* domain boundaries */
#define YMAX RCONST(1.0)
#define MX 10 /* mesh dimensions */
#define MY 5
#define NEQ MX*MY /* number of equations */
#define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */
#define T0 RCONST(0.0) /* initial time */
#define T1 RCONST(0.1) /* first output time */
#define DTOUT RCONST(0.1) /* output time increment */
#define NOUT 10 /* number of output times */
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define TWO RCONST(2.0)
#define FIVE RCONST(5.0)
/* User-defined vector access macro IJth */
/* IJth is defined in order to isolate the translation from the
mathematical 2-dimensional structure of the dependent variable vector
to the underlying 1-dimensional storage.
IJth(vdata,i,j) references the element in the vdata array for
u at mesh point (i,j), where 1 <= i <= MX, 1 <= j <= MY.
The vdata array is obtained via the macro call vdata = NV_DATA_S(v),
where v is an N_Vector.
The variables are ordered by the y index j, then by the x index i. */
#define IJth(vdata,i,j) (vdata[(j-1) + (i-1)*MY])
/* Type : UserData (contains grid constants) */
typedef struct {
realtype dx, dy, hdcoef, hacoef, vdcoef;
int nthreads;
} *UserData;
/* Private Helper Functions */
static void SetIC(N_Vector u, UserData data);
static void PrintHeader(realtype reltol, realtype abstol, realtype umax);
static void PrintOutput(realtype t, realtype umax, long int nst);
static void PrintFinalStats(void *cvode_mem);
/* Private function to check function return values */
static int check_retval(void *returnvalue, char *funcname, int opt);
/* Functions Called by the Solver */
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data);
static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J,
void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3);
/*
*-------------------------------
* Main Program
*-------------------------------
*/
int main(int argc, char *argv[])
{
realtype dx, dy, reltol, abstol, t, tout, umax;
N_Vector u;
UserData data;
SUNMatrix A;
SUNLinearSolver LS;
void *cvode_mem;
int iout, retval;
long int nst;
int num_threads;
u = NULL;
data = NULL;
A = NULL;
LS = NULL;
cvode_mem = NULL;
/* Set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* Overwrite with OMP_NUM_THREADS environment variable */
#endif
if (argc > 1) /* overwrite with command line value, if supplied */
num_threads = (int) strtol(argv[1], NULL, 0);
/* Create an OpenMP vector */
u = N_VNew_OpenMP(NEQ, num_threads); /* Allocate u vector */
if(check_retval((void*)u, "N_VNew_OpenMP", 0)) return(1);
reltol = ZERO; /* Set the tolerances */
abstol = ATOL;
data = (UserData) malloc(sizeof *data); /* Allocate data memory */
if(check_retval((void *)data, "malloc", 2)) return(1);
dx = data->dx = XMAX/(MX+1); /* Set grid coefficients in data */
dy = data->dy = YMAX/(MY+1);
data->hdcoef = ONE/(dx*dx);
data->hacoef = HALF/(TWO*dx);
data->vdcoef = ONE/(dy*dy);
data->nthreads = num_threads;
SetIC(u, data); /* Initialize u vector */
/* Call CVodeCreate to create the solver memory and specify the
* Backward Differentiation Formula */
cvode_mem = CVodeCreate(CV_BDF);
if(check_retval((void *)cvode_mem, "CVodeCreate", 0)) return(1);
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in u'=f(t,u), the inital time T0, and
* the initial dependent variable vector u. */
retval = CVodeInit(cvode_mem, f, T0, u);
if(check_retval(&retval, "CVodeInit", 1)) return(1);
/* Call CVodeSStolerances to specify the scalar relative tolerance
* and scalar absolute tolerance */
retval = CVodeSStolerances(cvode_mem, reltol, abstol);
if (check_retval(&retval, "CVodeSStolerances", 1)) return(1);
/* Set the pointer to user-defined data */
retval = CVodeSetUserData(cvode_mem, data);
if(check_retval(&retval, "CVodeSetUserData", 1)) return(1);
/* Create banded SUNMatrix for use in linear solves -- since this will be factored,
set the storage bandwidth to be the sum of upper and lower bandwidths */
A = SUNBandMatrix(NEQ, MY, MY);
if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1);
/* Create banded SUNLinearSolver object for use by CVode */
LS = SUNLinSol_Band(u, A);
if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1);
/* Call CVodeSetLinearSolver to attach the matrix and linear solver to CVode */
retval = CVodeSetLinearSolver(cvode_mem, LS, A);
if(check_retval(&retval, "CVodeSetLinearSolver", 1)) return(1);
/* Set the user-supplied Jacobian routine Jac */
retval = CVodeSetJacFn(cvode_mem, Jac);
if(check_retval(&retval, "CVodeSetJacFn", 1)) return(1);
/* In loop over output points: call CVode, print results, test for errors */
umax = N_VMaxNorm(u);
PrintHeader(reltol, abstol, umax);
for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) {
retval = CVode(cvode_mem, tout, u, &t, CV_NORMAL);
if(check_retval(&retval, "CVode", 1)) break;
umax = N_VMaxNorm(u);
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
PrintOutput(t, umax, nst);
}
PrintFinalStats(cvode_mem); /* Print some final statistics */
printf("num_threads = %i\n\n", num_threads);
N_VDestroy(u); /* Free the u vector */
CVodeFree(&cvode_mem); /* Free the integrator memory */
SUNLinSolFree(LS); /* Free the linear solver memory */
SUNMatDestroy(A); /* Free the matrix memory */
free(data); /* Free the user data */
return(0);
}
/*
*-------------------------------
* Functions called by the solver
*-------------------------------
*/
/* f routine. Compute f(t,u). */
static int f(realtype t, N_Vector u,N_Vector udot, void *user_data)
{
realtype uij, udn, uup, ult, urt, hordc, horac, verdc, hdiff, hadv, vdiff;
realtype *udata, *dudata;
sunindextype i, j;
UserData data;
i = j = 0;
udata = NV_DATA_OMP(u);
dudata = NV_DATA_OMP(udot);
/* Extract needed constants from data */
data = (UserData) user_data;
hordc = data->hdcoef;
horac = data->hacoef;
verdc = data->vdcoef;
/* Loop over all grid points. */
#pragma omp parallel for default(shared) private(j, i, uij, udn, uup, ult, urt, hdiff, hadv, vdiff) num_threads(data->nthreads)
for (j=1; j <= MY; j++) {
for (i=1; i <= MX; i++) {
/* Extract u at x_i, y_j and four neighboring points */
uij = IJth(udata, i, j);
udn = (j == 1) ? ZERO : IJth(udata, i, j-1);
uup = (j == MY) ? ZERO : IJth(udata, i, j+1);
ult = (i == 1) ? ZERO : IJth(udata, i-1, j);
urt = (i == MX) ? ZERO : IJth(udata, i+1, j);
/* Set diffusion and advection terms and load into udot */
hdiff = hordc*(ult - TWO*uij + urt);
hadv = horac*(urt - ult);
vdiff = verdc*(uup - TWO*uij + udn);
IJth(dudata, i, j) = hdiff + hadv + vdiff;
}
}
return(0);
}
/* Jacobian routine. Compute J(t,u). */
static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J,
void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3)
{
sunindextype i, j, k;
realtype *kthCol, hordc, horac, verdc;
UserData data;
/*
The components of f = udot that depend on u(i,j) are
f(i,j), f(i-1,j), f(i+1,j), f(i,j-1), f(i,j+1), with
df(i,j)/du(i,j) = -2 (1/dx^2 + 1/dy^2)
df(i-1,j)/du(i,j) = 1/dx^2 + .25/dx (if i > 1)
df(i+1,j)/du(i,j) = 1/dx^2 - .25/dx (if i < MX)
df(i,j-1)/du(i,j) = 1/dy^2 (if j > 1)
df(i,j+1)/du(i,j) = 1/dy^2 (if j < MY)
*/
i = j = k = 0;
data = (UserData) user_data;
hordc = data->hdcoef;
horac = data->hacoef;
verdc = data->vdcoef;
#pragma omp parallel for collapse(2) default(shared) private(i, j, k, kthCol) num_threads(data->nthreads)
for (j=1; j <= MY; j++) {
for (i=1; i <= MX; i++) {
k = j-1 + (i-1)*MY;
kthCol = SUNBandMatrix_Column(J,k);
/* set the kth column of J */
SM_COLUMN_ELEMENT_B(kthCol,k,k) = -TWO*(verdc+hordc);
if (i != 1) SM_COLUMN_ELEMENT_B(kthCol,k-MY,k) = hordc + horac;
if (i != MX) SM_COLUMN_ELEMENT_B(kthCol,k+MY,k) = hordc - horac;
if (j != 1) SM_COLUMN_ELEMENT_B(kthCol,k-1,k) = verdc;
if (j != MY) SM_COLUMN_ELEMENT_B(kthCol,k+1,k) = verdc;
}
}
return(0);
}
/*
*-------------------------------
* Private helper functions
*-------------------------------
*/
/* Set initial conditions in u vector */
static void SetIC(N_Vector u, UserData data)
{
sunindextype i, j;
realtype x, y, dx, dy;
realtype *udata;
i = j = 0;
/* Extract needed constants from data */
dx = data->dx;
dy = data->dy;
/* Set pointer to data array in vector u. */
udata = NV_DATA_OMP(u);
/* Load initial profile into u vector */
#pragma omp parallel for default(shared) private(j, i, y, x)
for (j=1; j <= MY; j++) {
y = j*dy;
for (i=1; i <= MX; i++) {
x = i*dx;
IJth(udata,i,j) = x*(XMAX - x)*y*(YMAX - y)*exp(FIVE*x*y);
}
}
}
/* Print first lines of output (problem description) */
static void PrintHeader(realtype reltol, realtype abstol, realtype umax)
{
printf("\n2-D Advection-Diffusion Equation\n");
printf("Mesh dimensions = %d X %d\n", MX, MY);
printf("Total system size = %d\n", NEQ);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n",
reltol, abstol);
printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: reltol = %g abstol = %g\n\n",
reltol, abstol);
printf("At t = %g max.norm(u) =%14.6e \n", T0, umax);
#else
printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol);
printf("At t = %g max.norm(u) =%14.6e \n", T0, umax);
#endif
return;
}
/* Print current value */
static void PrintOutput(realtype t, realtype umax, long int nst)
{
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst);
#else
printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst);
#endif
return;
}
/* Get and print some final statistics */
static void PrintFinalStats(void *cvode_mem)
{
int retval;
long int nst, nfe, nsetups, netf, nni, ncfn, nje, nfeLS;
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
retval = CVodeGetNumRhsEvals(cvode_mem, &nfe);
check_retval(&retval, "CVodeGetNumRhsEvals", 1);
retval = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups);
check_retval(&retval, "CVodeGetNumLinSolvSetups", 1);
retval = CVodeGetNumErrTestFails(cvode_mem, &netf);
check_retval(&retval, "CVodeGetNumErrTestFails", 1);
retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni);
check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1);
retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn);
check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1);
retval = CVodeGetNumJacEvals(cvode_mem, &nje);
check_retval(&retval, "CVodeGetNumJacEvals", 1);
retval = CVodeGetNumLinRhsEvals(cvode_mem, &nfeLS);
check_retval(&retval, "CVodeGetNumLinRhsEvals", 1);
printf("\nFinal Statistics:\n");
printf("nst = %-6ld nfe = %-6ld nsetups = %-6ld nfeLS = %-6ld nje = %ld\n",
nst, nfe, nsetups, nfeLS, nje);
printf("nni = %-6ld ncfn = %-6ld netf = %ld\n",
nni, ncfn, netf);
return;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns an integer value so check if
retval < 0
opt == 2 means function allocates memory so check if returned
NULL pointer */
static int check_retval(void *returnvalue, char *funcname, int opt)
{
int *retval;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && returnvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
/* Check if retval < 0 */
else if (opt == 1) {
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1); }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && returnvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
return(0);
}
|
displacement_lagrangemultiplier_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
#include "utilities/constraint_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* @details This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The r_table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
/// The epsilon tolerance definition
static constexpr double Tolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Default Constructor.
* @param DispRatioTolerance Relative tolerance for displacement error
* @param DispAbsTolerance Absolute tolerance for displacement error
* @param RotRatioTolerance Relative tolerance for rotation error
* @param RotAbsTolerance Absolute tolerance for rotation error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier error
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const TDataType RotRatioTolerance,
const TDataType RotAbsTolerance,
const TDataType LMRatioTolerance,
const TDataType LMAbsTolerance,
const bool EnsureContact = false,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
// The displacement solution
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
// The rotation solution
mRotRatioTolerance = RotRatioTolerance;
mRotAbsTolerance = RotAbsTolerance;
// The contact solution
mLMRatioTolerance = LMRatioTolerance;
mLMAbsTolerance = LMAbsTolerance;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: BaseType()
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
// Copy constructor.
DisplacementLagrangeMultiplierContactCriteria( DisplacementLagrangeMultiplierContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mRotRatioTolerance(rOther.mRotRatioTolerance)
,mRotAbsTolerance(rOther.mRotAbsTolerance)
,mLMRatioTolerance(rOther.mLMRatioTolerance)
,mLMAbsTolerance(rOther.mLMAbsTolerance)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something
// Initialize
TDataType disp_solution_norm = 0.0, rot_solution_norm = 0.0, lm_solution_norm = 0.0, disp_increase_norm = 0.0, rot_increase_norm = 0.0, lm_increase_norm = 0.0;
IndexType disp_dof_num(0),rot_dof_num(0),lm_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
TDataType dof_value = 0.0, dof_incr = 0.0;
// The number of active dofs
const std::size_t number_active_dofs = rb.size();
// Auxiliar displacement DoF check
const std::function<bool(const VariableData&)> check_without_rot =
[](const VariableData& rCurrVar) -> bool {return true;};
const std::function<bool(const VariableData&)> check_with_rot =
[](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));};
const auto* p_check_disp = (mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot;
// Loop over Dofs
#pragma omp parallel for firstprivate(dof_id, dof_value ,dof_incr) reduction(+:disp_solution_norm, rot_solution_norm, lm_solution_norm, disp_increase_norm, rot_increase_norm, lm_increase_norm, disp_dof_num, rot_dof_num, lm_dof_num)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
dof_id = it_dof->EquationId();
// Check dof id is solved
if (dof_id < number_active_dofs) {
if (mActiveDofs[dof_id] == 1) {
dof_value = it_dof->GetSolutionStepValue(0);
dof_incr = rDx[dof_id];
const auto& r_curr_var = it_dof->GetVariable();
if ((r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (r_curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) {
lm_solution_norm += std::pow(dof_value, 2);
lm_increase_norm += std::pow(dof_incr, 2);
++lm_dof_num;
} else if ((*p_check_disp)(r_curr_var)) {
disp_solution_norm += std::pow(dof_value, 2);
disp_increase_norm += std::pow(dof_incr, 2);
++disp_dof_num;
} else { // We will assume is rotation dof
KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl;
rot_solution_norm += std::pow(dof_value, 2);
rot_increase_norm += std::pow(dof_incr, 2);
++rot_dof_num;
}
}
}
}
if(disp_increase_norm < Tolerance) disp_increase_norm = 1.0;
if(rot_increase_norm < Tolerance) rot_increase_norm = 1.0;
if(lm_increase_norm < Tolerance) lm_increase_norm = 1.0;
if(disp_solution_norm < Tolerance) disp_solution_norm = 1.0;
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) << "WARNING::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
const TDataType disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm);
const TDataType rot_ratio = std::sqrt(rot_increase_norm/rot_solution_norm);
const TDataType lm_ratio = lm_solution_norm > Tolerance ? std::sqrt(lm_increase_norm/lm_solution_norm) : 0.0;
const TDataType disp_abs = std::sqrt(disp_increase_norm)/static_cast<TDataType>(disp_dof_num);
const TDataType rot_abs = std::sqrt(rot_increase_norm)/static_cast<TDataType>(rot_dof_num);
const TDataType lm_abs = std::sqrt(lm_increase_norm)/static_cast<TDataType>(lm_dof_num);
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
r_table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << rot_ratio << mRotRatioTolerance << rot_abs << mRotAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance;
} else {
r_table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance;
}
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tROTATION: RATIO = ") << rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl;
}
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT(" LAGRANGE MUL:\tRATIO = ") << lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tROTATION: RATIO = " << rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl;
}
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << " LAGRANGE MUL:\tRATIO = " << lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl;
}
}
}
// We check if converged
const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance);
const bool rot_converged = (mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? (rot_ratio <= mRotRatioTolerance || rot_abs <= mRotAbsTolerance) : true;
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) ? true : (lm_ratio <= mLMRatioTolerance || lm_abs <= mLMAbsTolerance);
if (disp_converged && rot_converged && lm_converged) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FGRN(" Achieved"));
else
r_table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is not achieved" << std::endl;
}
}
return false;
}
}
else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart ) override
{
// Initialize
BaseType::mConvergenceCriteriaIsInitialized = true;
// Check rotation dof
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart));
// Initialize header
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
if (mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
r_table.AddColumn("RT RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
}
r_table.AddColumn("LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Filling mActiveDofs when MPC exist
ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet);
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "displacement_lagrangemultiplier_contact_criteria",
"ensure_contact" : false,
"print_convergence_criterion" : false,
"displacement_relative_tolerance" : 1.0e-4,
"displacement_absolute_tolerance" : 1.0e-9,
"rotation_relative_tolerance" : 1.0e-4,
"rotation_absolute_tolerance" : 1.0e-9,
"contact_displacement_relative_tolerance" : 1.0e-4,
"contact_displacement_absolute_tolerance" : 1.0e-9
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "displacement_lagrangemultiplier_contact_criteria";
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// The displacement solution
mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble();
// The rotation solution
mRotRatioTolerance = ThisParameters["rotation_relative_tolerance"].GetDouble();
mRotAbsTolerance = ThisParameters["rotation_absolute_tolerance"].GetDouble();
// The contact solution
mLMRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble();
mLMAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement
TDataType mRotRatioTolerance; /// The ratio threshold for the norm of the rotation
TDataType mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation
TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM
TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM
std::vector<int> mActiveDofs; /// This vector contains the dofs that are active
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H */
|
LG_CC_FastSV5.c | //------------------------------------------------------------------------------
// LG_CC_FastSV5: connected components
//------------------------------------------------------------------------------
// LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved.
// SPDX-License-Identifier: BSD-2-Clause
//------------------------------------------------------------------------------
// Code is based on the algorithm described in the following paper
// Zhang, Azad, Hu. FastSV: FastSV: A Distributed-Memory Connected Component
// Algorithm with Fast Convergence (SIAM PP20)
// A subsequent update to the algorithm is here (which might not be reflected
// in this code):
//
// Yongzhe Zhang, Ariful Azad, Aydin Buluc: Parallel algorithms for finding
// connected components using linear algebra. J. Parallel Distributed Comput.
// 144: 14-27 (2020).
// Modified by Tim Davis, Texas A&M University
// The input matrix A must be symmetric. Self-edges (diagonal entries) are
// OK, and are ignored. The values and type of A are ignored; just its
// structure is accessed.
// The matrix A must have dimension 2^32 or less.
// todo: Need a 64-bit version of this method.
// todo: this function is not thread-safe, since it exports G->A and then
// reimports it back. G->A is unchanged when the function returns, but during
// execution G->A is invalid.
#define LAGraph_FREE_ALL ;
#include "LG_internal.h"
#if !LG_VANILLA
#if (! LG_SUITESPARSE )
#error "SuiteSparse:GraphBLAS v6.0.0 or later required"
#endif
//------------------------------------------------------------------------------
// hash functions: todo describe me
//------------------------------------------------------------------------------
// hash table size must be a power of 2
#define HASH_SIZE 1024
// number of samples to insert into the hash table
// todo: this seems to be a lot of entries for a HASH_SIZE of 1024.
// There could be lots of collisions.
#define HASH_SAMPLES 864
#define HASH(x) (((x << 4) + x) & (HASH_SIZE-1))
#define NEXT(x) ((x + 23) & (HASH_SIZE-1))
//------------------------------------------------------------------------------
// ht_init: todo describe me
//------------------------------------------------------------------------------
// Clear the hash table counts (ht_val [0:HASH_SIZE-1] = 0), and set all hash
// table entries as empty (ht_key [0:HASH_SIZE-1] =-1).
// todo: the memset of ht_key is confusing
// todo: the name "ht_val" is confusing. It is not a value, but a count of
// the number of times the value x = ht_key [h] has been inserted into the
// hth position in the hash table. It should be renamed ht_cnt.
static inline void ht_init
(
int32_t *ht_key,
int32_t *ht_val
)
{
memset (ht_key, -1, sizeof (int32_t) * HASH_SIZE) ;
memset (ht_val, 0, sizeof (int32_t) * HASH_SIZE) ;
}
//------------------------------------------------------------------------------
// ht_sample: todo describe me
//------------------------------------------------------------------------------
//
static inline void ht_sample
(
uint32_t *V32, // array of size n (todo: this is a bad variable name)
int32_t n,
int32_t samples, // number of samples to take from V32
int32_t *ht_key,
int32_t *ht_val,
uint64_t *seed
)
{
for (int32_t k = 0 ; k < samples ; k++)
{
// select an entry from V32 at random
int32_t x = V32 [LAGraph_Random60 (seed) % n] ;
// find x in the hash table
// todo: make this loop a static inline function (see also below)
int32_t h = HASH (x) ;
while (ht_key [h] != -1 && ht_key [h] != x)
{
h = NEXT (h) ;
}
ht_key [h] = x ;
ht_val [h]++ ;
}
}
//------------------------------------------------------------------------------
// ht_most_frequent: todo describe me
//------------------------------------------------------------------------------
// todo what if key is returned as -1? Code breaks. todo: handle this case
static inline int32_t ht_most_frequent
(
int32_t *ht_key,
int32_t *ht_val
)
{
int32_t key = -1 ;
int32_t val = 0 ; // max (ht_val [0:HASH_SIZE-1])
for (int32_t h = 0 ; h < HASH_SIZE ; h++)
{
if (ht_val [h] > val)
{
key = ht_key [h] ;
val = ht_val [h] ;
}
}
return (key) ; // return most frequent key
}
//------------------------------------------------------------------------------
// Reduce_assign32: w (index) += s, using MIN as the "+=" accum operator
//------------------------------------------------------------------------------
// mask = NULL, accumulator = GrB_MIN_UINT32, descriptor = NULL.
// Duplicates are summed with the accumulator, which differs from how
// GrB_assign works. GrB_assign states that the presence of duplicates results
// in undefined behavior. GrB_assign in SuiteSparse:GraphBLAS follows the
// MATLAB rule, which discards all but the first of the duplicates.
// todo: add this to GraphBLAS as a variant of GrB_assign, either as
// GxB_assign_accum (or another name), or as a GxB_* descriptor setting.
static inline int Reduce_assign32
(
GrB_Vector *w_handle, // vector of size n, all entries present
GrB_Vector *s_handle, // vector of size n, all entries present
uint32_t *index, // array of size n, can have duplicates
GrB_Index n,
int nthreads,
int32_t *ht_key, // hash table
int32_t *ht_val, // hash table (count of # of entries)
uint64_t *seed, // random
char *msg
)
{
GrB_Type w_type, s_type ;
GrB_Index w_n, s_n, w_nvals, s_nvals, *w_i, *s_i, w_size, s_size ;
uint32_t *w_x, *s_x ;
bool s_iso = false ;
//--------------------------------------------------------------------------
// export w and s
//--------------------------------------------------------------------------
// export the GrB_Vectors w and s as full arrays, to get direct access to
// their contents. Note that this would fail if w or s are not full, with
// all entries present.
GrB_TRY (GxB_Vector_export_Full (w_handle, &w_type, &w_n, (void **) &w_x,
&w_size, NULL, NULL)) ;
GrB_TRY (GxB_Vector_export_Full (s_handle, &s_type, &s_n, (void **) &s_x,
&s_size, &s_iso, NULL)) ;
if (nthreads >= 4)
{
// allocate a buf array for each thread, of size HASH_SIZE
uint32_t *mem = LAGraph_Malloc (nthreads*HASH_SIZE, sizeof (uint32_t)) ;
// todo: check out-of-memory condition here
// todo why is hashing needed here? hashing is slow for what needs
// to be computed here. GraphBLAS has fast MIN atomic monoids that
// do not require hashing.
ht_init (ht_key, ht_val) ;
ht_sample (index, n, HASH_SAMPLES, ht_key, ht_val, seed) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
// get the thread-specific buf array of size HASH_SIZE
// todo: buf is a bad variable name; it's not a "buffer",
// but a local workspace to compute the local version of w_x.
uint32_t *buf = mem + tid * HASH_SIZE ;
// copy the values from the global hash table into buf
for (int32_t h = 0 ; h < HASH_SIZE ; h++)
{
if (ht_key [h] != -1)
{
buf [h] = w_x [ht_key [h]] ;
}
}
// this thread works on index [kstart:kend]
int32_t kstart = (n * tid + nthreads - 1) / nthreads ;
int32_t kend = (n * tid + n + nthreads - 1) / nthreads ;
for (int32_t k = kstart ; k < kend ; k++)
{
uint32_t i = index [k] ;
// todo: make this loop a static inline function
int32_t h = HASH (i) ;
while (ht_key [h] != -1 && ht_key [h] != i)
{
h = NEXT (h) ;
}
if (ht_key [h] == -1)
{
// todo is this a race condition?
w_x [i] = LAGraph_MIN (w_x [i], s_x [s_iso?0:k]) ;
}
else
{
buf [h] = LAGraph_MIN (buf [h], s_x [s_iso?0:k]) ;
}
}
}
// combine intermediate results from each thread
for (int32_t h = 0 ; h < HASH_SIZE ; h++)
{
int32_t i = ht_key [h] ;
if (i != -1)
{
for (int32_t tid = 0 ; tid < nthreads ; tid++)
{
w_x [i] = LAGraph_MIN (w_x [i], mem [tid * HASH_SIZE + h]) ;
}
}
}
LAGraph_Free ((void **) &mem) ;
}
else
{
// sequential version
for (GrB_Index k = 0 ; k < n ; k++)
{
uint32_t i = index [k] ;
w_x [i] = LAGraph_MIN (w_x [i], s_x [s_iso?0:k]) ;
}
}
//--------------------------------------------------------------------------
// reimport w and s back into GrB_Vectors, and return result
//--------------------------------------------------------------------------
// s is unchanged. It was exported only to compute w (index) += s
GrB_TRY (GxB_Vector_import_Full (w_handle, w_type, w_n, (void **) &w_x,
w_size, false, NULL)) ;
GrB_TRY (GxB_Vector_import_Full (s_handle, s_type, s_n, (void **) &s_x,
s_size, s_iso, NULL)) ;
return (0) ;
}
//------------------------------------------------------------------------------
// LG_CC_FastSV5
//------------------------------------------------------------------------------
// The output of LG_CC_FastSV5 is a vector component, where
// component(i)=s if node i is in the connected compononent whose
// representative node is node s. If s is a representative, then
// component(s)=s. The number of connected components in the graph G is the
// number of representatives.
#undef LAGraph_FREE_ALL
#define LAGraph_FREE_ALL \
{ \
LAGraph_Free ((void **) &I) ; \
LAGraph_Free ((void **) &V32) ; \
LAGraph_Free ((void **) &ht_key) ; \
LAGraph_Free ((void **) &ht_val) ; \
/* todo why is T not freed?? */ \
GrB_free (&f) ; \
GrB_free (&gp) ; \
GrB_free (&mngp) ; \
GrB_free (&gp_new) ; \
GrB_free (&mod) ; \
}
#endif
int LG_CC_FastSV5 // SuiteSparse:GraphBLAS method, with GxB extensions
(
// output
GrB_Vector *component, // component(i)=s if node is in the component s
// inputs
LAGraph_Graph G, // input graph, G->A can change
char *msg
)
{
#if LG_VANILLA
LG_CHECK (0, -1, "SuiteSparse required for this method") ;
#else
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
LG_CLEAR_MSG ;
uint32_t *V32 = NULL ;
int32_t *ht_key = NULL, *ht_val = NULL ;
GrB_Index n, nnz, *I = NULL ;
GrB_Vector f = NULL, gp_new = NULL, mngp = NULL, mod = NULL, gp = NULL ;
GrB_Matrix T = NULL ;
LG_CHECK (LAGraph_CheckGraph (G, msg), -1, "graph is invalid") ;
LG_CHECK (component == NULL, -1, "component parameter is NULL") ;
if (G->kind == LAGRAPH_ADJACENCY_UNDIRECTED ||
(G->kind == LAGRAPH_ADJACENCY_DIRECTED &&
G->A_structure_is_symmetric == LAGRAPH_TRUE))
{
// A must be symmetric
;
}
else
{
// A must not be unsymmetric
LG_CHECK (false, -1, "input must be symmetric") ;
}
GrB_Matrix S = G->A ;
GrB_TRY (GrB_Matrix_nrows (&n, S)) ;
GrB_TRY (GrB_Matrix_nvals (&nnz, S)) ;
LG_CHECK (n > UINT32_MAX, -1, "problem too large (fixme)") ;
#define FASTSV_SAMPLES 4
bool sampling = (n * FASTSV_SAMPLES * 2 < nnz) ;
// random number seed
uint64_t seed = n ;
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
// determine # of threads to use for Reduce_assign
int nthreads ;
LAGraph_TRY (LAGraph_GetNumThreads (&nthreads, NULL)) ;
nthreads = LAGraph_MIN (nthreads, n / 16) ;
nthreads = LAGraph_MAX (nthreads, 1) ;
// # of threads to use for typecast
int nthreads2 = n / (64*1024) ;
nthreads2 = LAGraph_MIN (nthreads2, nthreads) ;
nthreads2 = LAGraph_MAX (nthreads2, 1) ;
// vectors
GrB_TRY (GrB_Vector_new (&f, GrB_UINT32, n)) ;
GrB_TRY (GrB_Vector_new (&gp_new, GrB_UINT32, n)) ;
GrB_TRY (GrB_Vector_new (&mod, GrB_BOOL, n)) ;
// temporary arrays
I = LAGraph_Malloc (n, sizeof (GrB_Index)) ;
V32 = LAGraph_Malloc (n, sizeof (uint32_t)) ;
// todo: check out-of-memory condition
// prepare vectors
#pragma omp parallel for num_threads(nthreads2) schedule(static)
for (GrB_Index i = 0 ; i < n ; i++)
{
I [i] = i ;
V32 [i] = (uint32_t) i ;
}
GrB_TRY (GrB_Vector_build (f, I, V32, n, GrB_PLUS_UINT32)) ;
GrB_TRY (GrB_Vector_dup (&gp, f)) ;
GrB_TRY (GrB_Vector_dup (&mngp, f)) ;
// allocate the hash table
ht_key = LAGraph_Malloc (HASH_SIZE, sizeof (int32_t)) ;
ht_val = LAGraph_Malloc (HASH_SIZE, sizeof (int32_t)) ;
LG_CHECK (ht_key == NULL || ht_val == NULL, -1, "out of memory") ;
//--------------------------------------------------------------------------
// sample phase
//--------------------------------------------------------------------------
if (sampling)
{
//----------------------------------------------------------------------
// export S = G->A in CSR format
//----------------------------------------------------------------------
// S is not modified. It is only exported so that its contents can be
// read by the parallel loops below.
GrB_Type type ;
GrB_Index nrows, ncols, nvals ;
size_t typesize ;
int64_t nonempty ;
GrB_Index *Sp, *Sj ;
void *Sx ;
bool S_jumbled = false ;
GrB_Index Sp_size, Sj_size, Sx_size ;
bool S_iso = false ;
GrB_TRY (GrB_Matrix_nvals (&nvals, S)) ;
GrB_TRY (GxB_Matrix_export_CSR (&S, &type, &nrows, &ncols, &Sp, &Sj,
&Sx, &Sp_size, &Sj_size, &Sx_size,
&S_iso, &S_jumbled, NULL)) ;
GrB_TRY (GxB_Type_size (&typesize, type)) ;
G->A = NULL ;
//----------------------------------------------------------------------
// allocate space to construct T
//----------------------------------------------------------------------
GrB_Index Tp_len = nrows+1, Tp_size = Tp_len*sizeof(GrB_Index);
GrB_Index Tj_len = nvals, Tj_size = Tj_len*sizeof(GrB_Index);
GrB_Index Tx_len = nvals ;
GrB_Index *Tp = LAGraph_Malloc (Tp_len, sizeof (GrB_Index)) ;
GrB_Index *Tj = LAGraph_Malloc (Tj_len, sizeof (GrB_Index)) ;
GrB_Index Tx_size = typesize ;
void *Tx = LAGraph_Calloc (1, typesize) ; // T is iso
// todo check out-of-memory conditions
//----------------------------------------------------------------------
// allocate workspace
//----------------------------------------------------------------------
int32_t *range = LAGraph_Malloc (nthreads + 1, sizeof (int32_t)) ;
GrB_Index *count = LAGraph_Malloc (nthreads + 1, sizeof (GrB_Index)) ;
// todo check out-of-memory conditions
memset (count, 0, sizeof (GrB_Index) * (nthreads + 1)) ;
//----------------------------------------------------------------------
// define parallel tasks to construct T
//----------------------------------------------------------------------
// thread tid works on rows range[tid]:range[tid+1]-1 of S and T
for (int tid = 0 ; tid <= nthreads ; tid++)
{
range [tid] = (n * tid + nthreads - 1) / nthreads ;
}
//----------------------------------------------------------------------
// determine the number entries to be constructed in T for each thread
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
for (int32_t i = range [tid] ; i < range [tid+1] ; i++)
{
int32_t deg = Sp [i + 1] - Sp [i] ;
count [tid + 1] += LAGraph_MIN (FASTSV_SAMPLES, deg) ;
}
}
//----------------------------------------------------------------------
// count = cumsum (count)
//----------------------------------------------------------------------
for (int tid = 0 ; tid < nthreads ; tid++)
{
count [tid + 1] += count [tid] ;
}
//----------------------------------------------------------------------
// construct T
//----------------------------------------------------------------------
// T (i,:) consists of the first FASTSV_SAMPLES of S (i,:).
// todo: this could be done by GxB_Select, using a new operator. Need
// to define a set of GxB_SelectOp operators that would allow for this.
// Note that Tx is not modified. Only Tp and Tj are constructed.
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
GrB_Index p = count [tid] ;
Tp [range [tid]] = p ;
for (int32_t i = range [tid] ; i < range [tid+1] ; i++)
{
// construct T (i,:) from the first entries in S (i,:)
for (int32_t j = 0 ;
j < FASTSV_SAMPLES && Sp [i] + j < Sp [i + 1] ; j++)
{
Tj [p++] = Sj [Sp [i] + j] ;
}
Tp [i + 1] = p ;
}
}
//----------------------------------------------------------------------
// import the result into the GrB_Matrix T
//----------------------------------------------------------------------
// Note that Tx is unmodified.
// in SuiteSparse:GraphBLAS v5, sizes are in bytes, not entries
GrB_Index Tp_siz = Tp_size ;
GrB_Index Tj_siz = Tj_size ;
GrB_Index Tx_siz = Tx_size ;
GrB_Index t_nvals = Tp [nrows] ;
GrB_TRY (GxB_Matrix_import_CSR (&T, type, nrows, ncols,
&Tp, &Tj, &Tx, Tp_siz, Tj_siz, Tx_siz,
true, // T is iso
S_jumbled, NULL)) ;
//----------------------------------------------------------------------
// find the connected components of T
//----------------------------------------------------------------------
// todo: this is nearly identical to the final phase below.
// Make this a function
bool change = true, is_first = true ;
while (change)
{
// hooking & shortcutting
GrB_TRY (GrB_mxv (mngp, NULL, GrB_MIN_UINT32,
GrB_MIN_SECOND_SEMIRING_UINT32, T, gp, NULL)) ;
if (!is_first)
{
LAGraph_TRY (Reduce_assign32 (&f, &mngp, V32, n, nthreads,
ht_key, ht_val, &seed, msg)) ;
}
GrB_TRY (GrB_eWiseAdd (f, NULL, GrB_MIN_UINT32, GrB_MIN_UINT32,
mngp, gp, NULL)) ;
// calculate grandparent
// fixme: NULL parameter is SS:GrB extension
GrB_TRY (GrB_Vector_extractTuples (NULL, V32, &n, f)) ; // fixme
#pragma omp parallel for num_threads(nthreads2) schedule(static)
for (uint32_t i = 0 ; i < n ; i++)
{
I [i] = (GrB_Index) V32 [i] ;
}
GrB_TRY (GrB_extract (gp_new, NULL, NULL, f, I, n, NULL)) ;
// todo: GrB_Vector_extract should have a variant where the index
// list is not given by an array I, but as a GrB_Vector of type
// GrB_UINT64 (or which can be typecast to GrB_UINT64). This is a
// common issue that arises in other algorithms as well.
// Likewise GrB_Matrix_extract, and all forms of GrB_assign.
// check termination
GrB_TRY (GrB_eWiseMult (mod, NULL, NULL, GrB_NE_UINT32, gp_new,
gp, NULL)) ;
GrB_TRY (GrB_reduce (&change, NULL, GrB_LOR_MONOID_BOOL, mod,
NULL)) ;
// swap gp and gp_new
GrB_Vector t = gp ; gp = gp_new ; gp_new = t ;
is_first = false ;
}
//----------------------------------------------------------------------
// todo: describe me
//----------------------------------------------------------------------
ht_init (ht_key, ht_val) ;
ht_sample (V32, n, HASH_SAMPLES, ht_key, ht_val, &seed) ;
int32_t key = ht_most_frequent (ht_key, ht_val) ;
// todo: what if key is returned as -1? Then T below is invalid.
int64_t t_nonempty = -1 ;
bool T_jumbled = false, T_iso = true ;
// export T
GrB_TRY (GxB_Matrix_export_CSR (&T, &type, &nrows, &ncols, &Tp, &Tj,
&Tx, &Tp_siz, &Tj_siz, &Tx_siz,
&T_iso, &T_jumbled, NULL)) ;
// todo what is this phase doing? It is constructing a matrix T that
// depends only on S, key, and V32. T contains a subset of the entries
// in S, except that T (i,:) is empty if
// The prior content of T is ignored; it is exported from the earlier
// phase, only to reuse the allocated space for T. However, T_jumbled
// is preserved from the prior matrix T, which doesn't make sense.
// This parallel loop is badly load balanced. Each thread operates on
// the same number of rows of S, regardless of how many entries appear
// in each set of rows. It uses one thread per task, statically
// scheduled.
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
GrB_Index ptr = Sp [range [tid]] ;
// thread tid scans S (range [tid]:range [tid+1]-1,:),
// and constructs T(i,:) for all rows in this range.
for (int32_t i = range [tid] ; i < range [tid+1] ; i++)
{
int32_t pv = V32 [i] ; // what is pv?
Tp [i] = ptr ; // start the construction of T(i,:)
// T(i,:) is empty if pv == key
if (pv != key)
{
// scan S(i,:)
for (GrB_Index p = Sp [i] ; p < Sp [i+1] ; p++)
{
// get S(i,j)
int32_t j = Sj [p] ;
if (V32 [j] != key)
{
// add the entry T(i,j) to T, but skip it if
// V32 [j] is equal to key
Tj [ptr++] = j ;
}
}
// add the entry T(i,key) if there is room for it in T(i,:)
if (ptr - Tp [i] < Sp [i+1] - Sp [i])
{
Tj [ptr++] = key ;
}
}
}
// count the number of entries inserted into T by this thread?
count [tid] = ptr - Tp [range [tid]] ;
}
// Compact empty space out of Tj not filled in from the above phase.
// This is a lot of work and should be done in parallel.
GrB_Index offset = 0 ;
for (int tid = 0 ; tid < nthreads ; tid++)
{
memcpy (Tj + offset, Tj + Tp [range [tid]],
sizeof (GrB_Index) * count [tid]) ;
offset += count [tid] ;
count [tid] = offset - count [tid] ;
}
// Compact empty space out of Tp
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int tid = 0 ; tid < nthreads ; tid++)
{
GrB_Index ptr = Tp [range [tid]] ;
for (int32_t i = range [tid] ; i < range [tid+1] ; i++)
{
Tp [i] -= ptr - count [tid] ;
}
}
// finalize T
Tp [n] = offset ;
// free workspace
LAGraph_Free ((void **) &count) ;
LAGraph_Free ((void **) &range) ;
// import S (unchanged since last export)
GrB_TRY (GxB_Matrix_import_CSR (&S, type, nrows, ncols,
&Sp, &Sj, &Sx, Sp_size, Sj_size, Sx_size,
S_iso, S_jumbled, NULL)) ;
// import T for the final phase
GrB_TRY (GxB_Matrix_import_CSR (&T, type, nrows, ncols,
&Tp, &Tj, &Tx, Tp_siz, Tj_siz, Tx_siz,
T_iso, T_jumbled, NULL)) ;
// restore G->A
G->A = S ;
}
else
{
// no sampling; the final phase operates on the whole graph
T = S ;
}
//--------------------------------------------------------------------------
// final phase
//--------------------------------------------------------------------------
GrB_TRY (GrB_Matrix_nvals (&nnz, T)) ;
bool change = true ;
while (change && nnz > 0)
{
// hooking & shortcutting
GrB_TRY (GrB_mxv (mngp, NULL, GrB_MIN_UINT32,
GrB_MIN_SECOND_SEMIRING_UINT32, T, gp, NULL)) ;
GrB_TRY (Reduce_assign32 (&f, &mngp, V32, n, nthreads, ht_key,
ht_val, &seed, msg)) ;
GrB_TRY (GrB_eWiseAdd (f, NULL, GrB_MIN_UINT32, GrB_MIN_UINT32,
mngp, gp, NULL)) ;
// calculate grandparent
// fixme: NULL parameter is SS:GrB extension
GrB_TRY (GrB_Vector_extractTuples (NULL, V32, &n, f)) ; // fixme
#pragma omp parallel for num_threads(nthreads2) schedule(static)
for (uint32_t k = 0 ; k < n ; k++)
{
I [k] = (GrB_Index) V32 [k] ;
}
GrB_TRY (GrB_extract (gp_new, NULL, NULL, f, I, n, NULL)) ;
// check termination
GrB_TRY (GrB_eWiseMult (mod, NULL, NULL, GrB_NE_UINT32, gp_new, gp,
NULL)) ;
GrB_TRY (GrB_reduce (&change, NULL, GrB_LOR_MONOID_BOOL, mod, NULL)) ;
// swap gp and gp_new
GrB_Vector t = gp ; gp = gp_new ; gp_new = t ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
(*component) = f ;
f = NULL ;
if (sampling)
{
GrB_free (&T) ;
}
LAGraph_FREE_ALL ;
return (0) ;
#endif
}
|
util.h | /*******************************************************************************
* Copyright 2018 Tensor Tang. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/**
* This file defines some utilities that do not depends on jitinfer itself
*/
#pragma once
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "omp_thread.h"
#ifdef WIN32
#include <malloc.h>
#include <windows.h>
#endif
namespace jitinfer {
namespace util {
template <typename T>
inline size_t array_product(const T *p, size_t num) {
size_t out = 1;
for (size_t i = 0; i < num; ++i) {
out *= size_t(p[i]);
}
return out;
}
template <typename T, typename P>
inline bool one_of(T val, P item) {
return val == item;
}
template <typename T, typename P, typename... Args>
inline bool one_of(T val, P item, Args... item_others) {
return val == item || one_of(val, item_others...);
}
template <typename T>
inline bool all_true(T expr) {
return expr;
}
template <typename T, typename... Args>
inline bool all_true(T expr, Args... others_expr) {
return expr && all_true(others_expr...);
}
inline int dividable_of(int val, int divisor) {
if (val % divisor == 0) {
return divisor;
} else {
return 1;
}
}
template <typename... Args>
inline int dividable_of(int val, int divisor, Args... others_divisor) {
if (val % divisor == 0) {
return divisor;
} else {
return dividable_of(val, others_divisor...);
}
}
inline int find_dividable(int val, int divisor) {
if (divisor <= 1) {
return 1;
}
if (divisor > val) {
return val;
}
if (val % divisor == 0) {
return divisor;
} else {
return find_dividable(val, divisor - 1);
}
}
template <typename T>
inline void copy_array(T *dst, const T *src, size_t sz) {
// do not use memcpy, in case of memory aligment
#pragma omp parallel for schedule(static)
for (size_t i = 0; i < sz; ++i) {
dst[i] = src[i];
}
}
template <typename T, typename U>
inline void set_array(T *arr, const U &val, size_t size) {
#pragma omp parallel for schedule(static)
for (size_t i = 0; i < size; ++i) {
arr[i] = static_cast<T>(val);
}
}
template <typename T>
struct remove_reference {
typedef T type;
};
template <typename T>
struct remove_reference<T &> {
typedef T type;
};
template <typename T>
struct remove_reference<T &&> {
typedef T type;
};
template <typename T, typename U>
inline typename remove_reference<T>::type div_up(const T a, const U b) {
assert(b);
return (a + b - 1) / b;
}
template <typename T>
inline T &&forward(typename remove_reference<T>::type &t) {
return static_cast<T &&>(t);
}
template <typename T>
inline T &&forward(typename remove_reference<T>::type &&t) {
return static_cast<T &&>(t);
}
template <typename T>
inline typename remove_reference<T>::type zero() {
auto zero = typename remove_reference<T>::type();
return zero;
}
// divide jobs on workers
// for example 4 jobs to 3 worker get 2,1,1
template <typename T, typename U>
inline void balance211(T n, U team, U tid, T &n_start, T &n_end) {
T n_min = 1;
T &n_my = n_end;
if (team <= 1 || n == 0) {
n_start = 0;
n_my = n;
} else if (n_min == 1) {
// team = T1 + T2
// n = T1*n1 + T2*n2 (n1 - n2 = 1)
T n1 = div_up(n, (T)team);
T n2 = n1 - 1;
T T1 = n - n2 * (T)team;
n_my = (T)tid < T1 ? n1 : n2;
n_start = (T)tid <= T1 ? tid * n1 : T1 * n1 + ((T)tid - T1) * n2;
}
n_end += n_start;
}
template <typename T>
inline T nd_iterator_init(T start) {
return start;
}
template <typename T, typename U, typename W, typename... Args>
inline T nd_iterator_init(T start, U &x, const W &X, Args &&... tuple) {
start = nd_iterator_init(start, forward<Args>(tuple)...);
x = start % X;
return start / X;
}
inline bool nd_iterator_step() { return true; }
template <typename U, typename W, typename... Args>
inline bool nd_iterator_step(U &x, const W &X, Args &&... tuple) {
if (nd_iterator_step(forward<Args>(tuple)...)) {
x = (x + 1) % X;
return x == 0;
}
return false;
}
template <typename U, typename W, typename Y>
inline bool nd_iterator_jump(U &cur, const U end, W &x, const Y &X) {
U max_jump = end - cur;
U dim_jump = X - x;
if (dim_jump <= max_jump) {
x = 0;
cur += dim_jump;
return true;
} else {
cur += max_jump;
x += max_jump;
return false;
}
}
template <typename U, typename W, typename Y, typename... Args>
inline bool nd_iterator_jump(
U &cur, const U end, W &x, const Y &X, Args &&... tuple) {
if (nd_iterator_jump(cur, end, forward<Args>(tuple)...)) {
x = (x + 1) % X;
return x == 0;
}
return false;
}
namespace timer {
inline double get_current_ms() {
struct timeval time;
gettimeofday(&time, NULL);
return 1e+3 * time.tv_sec + 1e-3 * time.tv_usec;
};
}
namespace env {
int _getenv(char *value, const char *name, int length);
bool profiling_time();
bool jit_dump_code();
}
}
void *aligned_malloc(size_t size, int alignment);
void free(void *p);
}
|
parallel-numthreads.c | // Test if/num_threads clause handling
#include <assert.h>
#include <stdio.h>
#include <omp.h>
int main(void)
{
int i=0;
#pragma omp parallel if(i==0) num_threads(3)
{
#pragma omp single
{
assert (omp_get_num_threads() == 3 );
}
printf("Mutual exclusive output 1.\n");
}
#pragma omp parallel if(i!=0) num_threads(3)
{
#pragma omp single
{
assert (omp_get_num_threads() == 1 );
}
printf("Mutual exclusive output 2.\n");
}
return 0;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
Flock.h | #ifndef flock__h
#define flock__h
#include <aribeiro/aribeiro.h>
using namespace aRibeiro;
#include "Boid.h"
#include "App.h"
#include "BoidModel.h"
class Flock {
App *app;
vec3 sceneMin,sceneMax, sceneDelta;
//double buffer
aligned_vector<Boid> boids[2];
int doubleBuffer;
BoidModel *boidModel;
void computeNeighbor(const Boid *boid, const aligned_vector< Boid > &list,
int &neighborCounter,
vec3 &neighborAlignment,
vec3 &neighborSeparation,
vec3 &neighborCohesion
) {
neighborCounter = 0;
vec3 neighborCentroid = vec3(0);
neighborAlignment = vec3(0);
neighborSeparation = vec3(0);
neighborCohesion = vec3(0);
vec3 aux;
float dist;
for(int i=0;i<list.size();i++){
if (boid != &list[i]){
if (boid->isVisible(list[i],false)){
neighborCounter++;
neighborCentroid += list[i].pos;
neighborAlignment += list[i].dir;
}
if (boid->isVisible(list[i],true)){
//calcular separacao
aux = boid->pos - list[i].pos;
dist = length(aux);
if (dist <= 0.0002f) {
aux = Random::getVec3Direction();
aux *= 9999.0f;
} else {
aux /= dist;
aux *= 1.0f/(dist*dist);
}
neighborSeparation += aux;
}
}
}
if (neighborCounter > 0){
//neighborAlignment /= neighborCounter;
if (sqrLength(neighborAlignment) > 1e-6f)
neighborAlignment = normalize(neighborAlignment);
neighborCentroid /= neighborCounter;
neighborCohesion = neighborCentroid - boid->pos;
if (sqrLength(neighborCohesion) > 1e-6f)
neighborCohesion = normalize(neighborCohesion);
else
neighborCohesion = vec3(0);
}
}
public:
Boid boidMaster;
Flock(App *app, int numBoids , vec3 sceneMin, vec3 sceneMax):
boidMaster(
(sceneMax+sceneMin)*0.5f,
vec3(0,0,-1.0),
5.0f,
0.0f,0.0f) {
this->app = app;
this->sceneMin = sceneMin;
this->sceneMax = sceneMax;
sceneDelta = sceneMax - sceneMin;
boidModel = new BoidModel(app);
doubleBuffer = 0;
for(int i=0;i<numBoids;i++)
createBoid();
}
~Flock() {
setNullAndDelete(boidModel);
}
void createBoid() {
vec3 dir = Random::getVec3Direction();
dir *= vec3(1.0f,0.2f,1.0f);
dir = normalize(dir);
vec3 pos = sceneMin + (Random::getVec3() - vec3(0.5f))*2.0f*sceneDelta;
float vel = Random::getFloat() * 5.0f + 5.0f;
Boid boid = Boid(pos,dir,vel,
4.0f,//vision distance
120.0f//vision angle degree
);
boids[0].push_back(boid);
boids[1].push_back(boid);
}
void boidsUpdate(const PlatformTime &time) {
int source = doubleBuffer;
int target = (doubleBuffer + 1) % 2;
#pragma omp parallel for
for(int i=0;i<boids[target].size();i++){
Boid &bsource = boids[source][i];
Boid &btarget = boids[target][i];
btarget = bsource;
int neighborCounter;
vec3 neighborAlignment;
vec3 neighborSeparation;
vec3 neighborCohesion;
computeNeighbor(&bsource, boids[source],
neighborCounter,
neighborAlignment,
neighborSeparation,
neighborCohesion);
vec3 sceneCollision = vec3(0);
if (btarget.pos.y < sceneMin.y){
//sceneCollision = reflect(btarget.dir,vec3(0,1,0));
sceneCollision = vec3(0,1,0);
btarget.pos.y = sceneMin.y;
}else if (btarget.pos.y > sceneMax.y) {
sceneCollision = vec3(0,-1,0);//reflect(btarget.dir,vec3(0,1,0));
//btarget.pos.y = sceneMax.y;
}
vec3 toBoidMaster = boidMaster.pos - bsource.pos;
float lgnht = length(toBoidMaster);
if ( lgnht > 1e-6f)
toBoidMaster /= lgnht;
vec3 targetVel =
neighborAlignment * 1.5f +
neighborSeparation * 0.5f +
neighborCohesion * 1.0f +
sceneCollision * 1.0f
+toBoidMaster * 0.5f
;
targetVel *= 10.0f;
if (sqrLength(targetVel) > 1e-6f){
//vec3 aux = perpendicularComponent(targetVel, btarget.dir);
//vec3 targetDir = normalize(btarget.dir + aux);
vec3 targetDir = normalize(targetVel);
float targetVelF = dot(targetVel,btarget.dir);
if (absv(targetVelF)<2.0f)
targetVelF = sign(targetVelF)*2.0f;
btarget.dir = moveSlerp(bsource.dir, targetDir, DEG2RAD(time.deltaTime*100.0f));
btarget.vel = move(bsource.vel, targetVelF, time.deltaTime * 2.5f);
btarget.dir = normalize(btarget.dir);
btarget.vel = clamp(btarget.vel, 2.0f, 5.0f);
} else {
btarget.vel = clamp(move(btarget.vel,btarget.vel*1.5f,time.deltaTime*10.0f), 2.0f, 5.0f);
}
btarget.pos += btarget.dir * btarget.vel * time.deltaTime;
/*
if (btarget.pos.y < sceneMin.y){
btarget.dir = reflect(btarget.dir,vec3(0,1,0));
btarget.pos.y = sceneMin.y;
}else if (btarget.pos.y > sceneMax.y) {
btarget.dir = reflect(btarget.dir,vec3(0,1,0));
btarget.pos.y = sceneMax.y;
}*/
if (btarget.pos.x < sceneMin.x)
btarget.pos.x += sceneDelta.x;
else if (btarget.pos.x > sceneMax.x)
btarget.pos.x -= sceneDelta.x;
if (btarget.pos.z < sceneMin.z)
btarget.pos.z += sceneDelta.z;
else if (btarget.pos.z > sceneMax.z)
btarget.pos.z -= sceneDelta.z;
}
}
void draw(PlatformTime &time, const mat4 &m) {
//limit max variation to run like 60FPS
time.deltaTime = minimum(time.deltaTime, 1.0f/24.0f);
processBoidMasterInput(time);
boidsUpdate(time);
//int previousFrame = doubleBuffer;
int currentFrame = (doubleBuffer + 1) % 2;
for(int i=0;i<boids[currentFrame].size();i++){
Boid &boid = boids[currentFrame][i];
boidModel->draw(m, boid.pos, boid.dir);
}
doubleBuffer = (doubleBuffer + 1) % 2;
//
// Draw boid master
//
boidModel->drawBoidMaster(m, boidMaster.pos, boidMaster.dir);
}
void processBoidMasterInput(const PlatformTime &time) {
sf::Vector2i ipos = sf::Mouse::getPosition(*app->window);
iSize iscreen = app->WindowSize;
ipos.y = iscreen.y - 1 - ipos.y;
vec2 input = vec2((float)ipos.x / (float)(iscreen.x-1),
(float)ipos.y / (float)(iscreen.y-1));
input = clamp(input, vec2(0),vec2(1));
input = (input - 0.5f) * 2.0f;
if (input.x > -0.125f && input.x < 0.125f)
input.x = 0.0f;
vec3 targetDir = boidMaster.dir;
vec3 perp = cross(normalize(vec3(boidMaster.dir.x,0,boidMaster.dir.z)), vec3(0,1.0f,0));
targetDir += perp * input.x * 0.5f;
targetDir.y = input.y;
targetDir = normalize(targetDir);
boidMaster.dir = moveSlerp(boidMaster.dir, targetDir, DEG2RAD(80.0f) * time.deltaTime);
//
// BoidMaster calculation
//
boidMaster.pos += boidMaster.dir * boidMaster.vel * time.deltaTime;
if (boidMaster.pos.y < sceneMin.y){
boidMaster.dir.y = 0;
boidMaster.dir = normalize(boidMaster.dir);
boidMaster.pos.y = sceneMin.y;
}else if (boidMaster.pos.y > sceneMax.y) {
boidMaster.dir.y = 0;
boidMaster.dir = normalize(boidMaster.dir);
boidMaster.pos.y = sceneMax.y;
}
if (boidMaster.pos.x < sceneMin.x)
boidMaster.pos.x += sceneDelta.x;
else if (boidMaster.pos.x > sceneMax.x)
boidMaster.pos.x -= sceneDelta.x;
if (boidMaster.pos.z < sceneMin.z)
boidMaster.pos.z += sceneDelta.z;
else if (boidMaster.pos.z > sceneMax.z)
boidMaster.pos.z -= sceneDelta.z;
}
SSE2_CLASS_NEW_OPERATOR
};
#endif
|
move_shallow_water_particle_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Miguel Maso Sotomayor
// Pablo Becker
//
#ifndef KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED
#define KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED
///@defgroup MoveShallowWaterParticleUtility
///@brief Utility to move particles on the eulerian mesh with an
/// explicit scheme. This is the basic tool of the pfem2 framework
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/node.h"
#include "includes/checks.h"
#include "includes/dof.h"
#include "includes/variables.h"
#include "containers/array_1d.h"
#include "containers/data_value_container.h"
#include "includes/mesh.h"
#include "utilities/math_utils.h"
#include "includes/global_pointer_variables.h"
#include "processes/node_erase_process.h"
#include "utilities/geometry_utilities.h"
#include "includes/model_part.h"
#include "includes/kratos_parameters.h"
#include "spatial_containers/spatial_containers.h"
#include "spatial_containers/cell.h"
#include "spatial_containers/bins_dynamic_objects.h"
#include "utilities/spatial_containers_configure.h"
#include "geometries/line_2d_2.h"
#include "geometries/triangle_2d_3.h"
#include "geometries/triangle_3d_3.h"
#include "geometries/point.h"
#include "shallow_water_application_variables.h"
#include "shallow_water_particle.h"
#include "utilities/openmp_utils.h"
#include "time.h"
//#include "processes/process.h"
namespace Kratos
{
//this class is to be modified by the user to customize the interpolation process
template< unsigned int TDim>
class MoveShallowWaterParticleUtility
{
public:
typedef SpatialContainersConfigure<TDim> Configure;
typedef typename Configure::PointType PointType;
typedef typename Configure::ContainerType ContainerType;
typedef typename Configure::IteratorType IteratorType;
typedef typename Configure::ResultContainerType ResultContainerType;
typedef typename Configure::ResultIteratorType ResultIteratorType;
typedef PointerVector< ShallowParticle, ShallowParticle*, std::vector<ShallowParticle*> > ParticlePointerVector;
KRATOS_CLASS_POINTER_DEFINITION(MoveShallowWaterParticleUtility);
//template<unsigned int TDim>
MoveShallowWaterParticleUtility(ModelPart& rModelPart, Parameters rParameters) :
mrModelPart(rModelPart),
mScalarVar1(KratosComponents< Variable<double> >::Get( rParameters["convection_scalar_variable"].GetString() ) ),
mVectorVar1(KratosComponents< Variable<array_1d<double,3> > >::Get( rParameters["convection_vector_variable"].GetString() ) )
{
KRATOS_TRY
std::cout << "Initializing moveparticle utility for scalar transport" << std::endl;
Parameters default_parameters( R"(
{
"convection_scalar_variable" : "HEIGHT",
"convection_vector_variable" : "VELOCITY",
"maximum_number_of_particles" : 16
} )" );
// Now validate agains defaults -- this also ensures no type mismatch
rParameters.ValidateAndAssignDefaults(default_parameters);
m_scalar_var1_name = rParameters["convection_scalar_variable"].GetString();
m_vector_var1_name = rParameters["convection_vector_variable"].GetString();
mMaxNumberOfParticles = rParameters["maximum_number_of_particles"].GetDouble();
Check();
//storing water and air density and their inverses, just in case it is needed for the streamline integration
//loop in elements to change their ID to their position in the array. Easier to get information later.
//DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!!
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
ielem->SetId(ii+1);
}
mLastElemId= (mrModelPart.ElementsEnd()-1)->Id();
int node_id=0;
// we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used)
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator pnode = inodebegin+ii;
array_1d<double,3> position_node;
double distance=0.0;
position_node = pnode->Coordinates();
GlobalPointersVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES);
//we loop all the nodes to check all the edges
const double number_of_neighbours = static_cast<double>(rneigh.size());
for( GlobalPointersVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++)
{
array_1d<double,3> position_difference;
position_difference = inode->Coordinates() - position_node;
const double current_distance = norm_2( position_difference );
distance += current_distance / number_of_neighbours;
}
//and we save the largest edge.
pnode->SetValue(MEAN_SIZE, distance);
node_id=pnode->GetId();
}
}
mLastNodeId=node_id;
//we also calculate the element mean size in the same way, for the courant number
//also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure
std::vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
double elem_size;
array_1d<double,3> Edge(3,0.0);
Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates();
elem_size = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
elem_size += Edge[d]*Edge[d];
for (unsigned int i = 2; i < (TDim+1); i++)
for(unsigned int j = 0; j < i; j++)
{
Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates();
double Length = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
Length += Edge[d]*Edge[d];
if (Length < elem_size) elem_size = Length;
}
elem_size = sqrt(elem_size);
ielem->SetValue(MEAN_SIZE, elem_size);
}
}
//matrix containing the position of the 4/15/45 particles that we will seed at the beggining
BoundedMatrix<double, 5*(1+TDim), 3 > pos;
BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N;
int particle_id=0;
mNElems = mrModelPart.Elements().size();
std::cout << " about to resize vectors" << std::endl;
//setting the right size to the vector containing the particles assigned to each element
//particles vector. this vector contains ALL the particles in the simulation.
mParticlesVector.resize(mNElems*mMaxNumberOfParticles);
//and this vector contains the current number of particles that are in each element (currently zero)
mNumOfParticlesInElems.resize(mNElems);
mNumOfParticlesInElems=ZeroVector(mNElems);
//when moving the particles, an auxiliary vector is necessary (to store the previous number)
mNumOfParticlesInElemsAux.resize(mNElems);
//each element will have a list of pointers to all the particles that are inside.
//this vector contains the pointers to the vector of (particle) pointers of each element.
mVectorOfParticlePointersVectors.resize(mNElems);
//int artz;
//std::cin >> artz;
int i_int=0; //careful! it's not the id, but the position inside the array!
std::cout << " about to create particles" << std::endl;
//now we seed: LOOP IN ELEMENTS
//using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mMaxNumberOfParticles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one
mOffset=0;
//ShallowParticle& firstparticle = mParticlesVector[0];
for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
//(ielem->GetValue(BED_PARTICLE_POINTERS)) = ParticlePointerVector( mMaxNumberOfParticles*2, &firstparticle );
//ParticlePointerVector& particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//now we link the mpointers_to_particle_pointers_vectors to the corresponding element
//mpointers_to_particle_pointers_vectors(ii) = &particle_pointers;
//now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half).
//for(int j=0; j<(mMaxNumberOfParticles*2); j++)
// particle_pointers.push_back(&firstparticle);
mVectorOfParticlePointersVectors[ii] = ParticlePointerVector( mMaxNumberOfParticles*2 );
ParticlePointerVector& particle_pointers = mVectorOfParticlePointersVectors[ii];
//int & number_of_particles = ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles = mNumOfParticlesInElems[ii];
number_of_particles=0;
Geometry< Node<3> >& geom = ielem->GetGeometry();
//unsigned int elem_id = ielem->Id();
ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45
//now we seed the particles in the current element
for (unsigned int j = 0; j < pos.size1(); j++)
{
++particle_id;
ShallowParticle& pparticle = mParticlesVector[particle_id-1];
//~ pparticle.X()=pos(j,0);
//~ pparticle.Y()=pos(j,1);
//~ pparticle.Z()=pos(j,2);
pparticle.Coordinates() = row(pos,j);
pparticle.GetEraseFlag()=false;
array_1d<float, 3 > & vector1 = pparticle.GetVector1();
float & scalar1 = pparticle.GetScalar1();
noalias(vector1) = ZeroVector(3);
scalar1=0.0;
for (unsigned int k = 0; k < (TDim+1); k++)
{
scalar1 += N(j, k) * geom[k].FastGetSolutionStepValue(mScalarVar1);
noalias(vector1) += N(j, k) * geom[k].FastGetSolutionStepValue(mVectorVar1);
}
particle_pointers(j) = &pparticle;
number_of_particles++ ;
}
++i_int;
}
mNParticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true.
std::cout << " [Creating particles : " << mNParticles << " particles created]" << std::endl;
mParticlePrintingToolInitialized=false;
KRATOS_CATCH("")
}
~MoveShallowWaterParticleUtility()
{}
void MountBin()
{
KRATOS_TRY
//copy the elements to a new container, as the list will
//be shuffled duringthe construction of the tree
ContainerType& rElements = mrModelPart.ElementsArray();
IteratorType it_begin = rElements.begin();
IteratorType it_end = rElements.end();
//const int number_of_elem = rElements.size();
typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) );
paux.swap(mpBinsObjectDynamic);
//BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end );
std::cout << " finished mounting Bins" << std::endl;
KRATOS_CATCH("")
}
/// Calculates the mean velocity
/** This function computes the mean velocity within an element and
* stores it in MEAN_VEL_OVER_ELEM_SIZE variable.
* This variable keeps the courant number aprox 0.1 in each substep
*
* @see MoveParticle
* @see MoveParticleInverseWay
*/
void CalculateVelOverElemSize()
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const double nodal_weight = 1.0/ (1.0 + double (TDim) );
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
std::vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Geometry<Node<3> >& geom = ielem->GetGeometry();
array_1d<double, 3 >vector_mean_velocity=ZeroVector(3);
for (unsigned int i=0; i != (TDim+1) ; i++)
vector_mean_velocity += geom[i].FastGetSolutionStepValue(VELOCITY);
vector_mean_velocity *= nodal_weight;
//~ const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) );
const double mean_velocity = norm_2( vector_mean_velocity );
ielem->SetValue(MEAN_VEL_OVER_ELEM_SIZE, mean_velocity / ( ielem->GetValue(MEAN_SIZE) ) );
}
}
KRATOS_CATCH("")
}
/// Reset the boundary conditions
/** When a variable is fixed this function resets the nodal values
* with the previous time step
*/
void ResetBoundaryConditions()
{
KRATOS_TRY
typedef VariableComponent<VectorComponentAdaptor<array_1d<double, 3> > > component_type;
component_type vector_var_x = KratosComponents< component_type >::Get(m_vector_var1_name+std::string("_X"));
component_type vector_var_y = KratosComponents< component_type >::Get(m_vector_var1_name+std::string("_Y"));
component_type vector_var_z = KratosComponents< component_type >::Get(m_vector_var1_name+std::string("_Z"));
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
if (inode->IsFixed(mScalarVar1))
{
inode->FastGetSolutionStepValue(mScalarVar1)=inode->GetSolutionStepValue(mScalarVar1,1);
}
if (inode->IsFixed(vector_var_x))
{
inode->FastGetSolutionStepValue(vector_var_x)=inode->GetSolutionStepValue(vector_var_x,1);
}
if (inode->IsFixed(vector_var_y))
{
inode->FastGetSolutionStepValue(vector_var_y)=inode->GetSolutionStepValue(vector_var_y,1);
}
if (inode->IsFixed(vector_var_z))
{
inode->FastGetSolutionStepValue(vector_var_z)=inode->GetSolutionStepValue(vector_var_z,1);
}
}
}
KRATOS_CATCH("")
}
/// Auxiliar function to compute the "delta variables"
/** Delta variables are the difference between two time steps.
* It's value is used to update particles info
*
* @see CorrectParticlesWithoutMovingUsingDeltaVariables
*/
void CalculateDeltaVariables()
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(DELTA_SCALAR1) = inode->FastGetSolutionStepValue(mScalarVar1) - inode->FastGetSolutionStepValue(PROJECTED_SCALAR1); //PROJECTED_SCALAR1
inode->FastGetSolutionStepValue(DELTA_VECTOR1) = inode->FastGetSolutionStepValue(mVectorVar1) - inode->FastGetSolutionStepValue(PROJECTED_VECTOR1); //PROJECTED_VECTOR1
}
}
KRATOS_CATCH("")
}
/// Auxiliar function
/** This function copy a scalar variable value to the previous time step
*/
void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable);
}
}
KRATOS_CATCH("")
}
/// Auxiliar function
/** This function copy a vector variable value to the previous time step
*/
void CopyVectorVarToPreviousTimeStep(const Variable<array_1d<double,3>>& OriginVariable,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
noalias(inode->GetSolutionStepValue(OriginVariable,1)) = inode->FastGetSolutionStepValue(OriginVariable);
}
}
KRATOS_CATCH("")
}
/// Move all the particles
/** This function moves the particles across the streamlines
* according to the velocity given by VELOCITY variable. The
* movement is performed in nsubsteps, during a total time
* of DELTA_TIME
*
* @see Moveparticle
*/
void MoveParticles()
{
KRATOS_TRY
ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part)
//since it is the only function in the whole procedure that does this, it must use alternatively one part and the other.
bool even_timestep;
if (offset!=0) even_timestep=false;
else even_timestep=true;
const int post_offset = mMaxNumberOfParticles * static_cast<int>(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles
double delta_t = CurrentProcessInfo[DELTA_TIME];
array_1d<double,TDim+1> N;
const unsigned int max_results = 10000;
//double integration_distance= 2.0;
mMaxSubSteps = 10;
mMaxSubStepDt = delta_t / static_cast<double>(mMaxSubSteps);
std::vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii;
int & number_of_particles = mNumOfParticlesInElems[ii]; //old_element->GetValue(NUMBER_OF_BED_PARTICLES);
mNumOfParticlesInElemsAux[ii] = number_of_particles;
mNumOfParticlesInElems[ii] = 0;
//we reset the local vectors for a faster access;
}
}
std::cout << "convecting particles" << std::endl;
//We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle)
#pragma omp barrier
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
ResultContainerType results(max_results);
GlobalPointersVector< Element > elements_in_trajectory;
elements_in_trajectory.resize(20);
for(unsigned int ielem = element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++)
{
ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem;
const int old_element_id = old_element->Id();
ParticlePointerVector& old_element_particle_pointers = mVectorOfParticlePointersVectors[old_element_id-1];
if ( (results.size()) != max_results )
results.resize(max_results);
unsigned int number_of_elements_in_trajectory = 0; //excluding the origin one (current one, ielem)
for (int ii = 0; ii < mNumOfParticlesInElemsAux[ielem]; ii++)
{
ShallowParticle& pparticle = old_element_particle_pointers[offset+ii];
Element::Pointer pcurrent_element( *old_element.base() );
ResultIteratorType result_begin = results.begin();
bool & erase_flag=pparticle.GetEraseFlag();
if (erase_flag == false){
MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina
const int current_element_id = pcurrent_element->Id();
int & number_of_particles_in_current_elem = mNumOfParticlesInElems[current_element_id-1];
if (number_of_particles_in_current_elem < mMaxNumberOfParticles && erase_flag == false)
{
ParticlePointerVector& current_element_particle_pointers = mVectorOfParticlePointersVectors[current_element_id-1];
#pragma omp critical
{
if (number_of_particles_in_current_elem < mMaxNumberOfParticles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!!
{
current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle;
number_of_particles_in_current_elem++ ;
KRATOS_ERROR_IF( number_of_particles_in_current_elem > mMaxNumberOfParticles ) <<
"In move shallow water particle utility: exceeded maximum number of particles" << std::endl;
//~ if (number_of_particles_in_current_elem > mMaxNumberOfParticles)
//~ KRATOS_WATCH("MAL");
}
else
{
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
else
{
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
}
}
// After having changed everything we change the status of the mOddTimeStep flag:
mOffset = post_offset;; //
KRATOS_CATCH("")
}
/// Transfer particles information to the mesh nodes
/** This function explicitly projects data from particles (lagrangian)
* onto the eulerian mesh. Shape functions of the elements determine
* the particle location within the element and its contribution to
* each node as a weighting function.
*/
void TransferLagrangianToEulerian() //explicit
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
//const double delta_t =CurrentProcessInfo[DELTA_TIME];
const double threshold = 1e-10 / (static_cast<double>(TDim)+1.0);
std::cout << "projecting info to mesh" << std::endl;
const int offset = mOffset;
// the array of pointers for each element has twice the required size so that
// we use a part in odd timesteps and the other in even ones.
//(flag managed only by MoveParticles)
// We must project data from the particles (lagrangian) onto the eulerian mesh
//int nnodes = mrModelPart.Nodes().size();
//array_1d<double,(n_nodes)> eulerian_nodes_sumweights;
// We save data from previous time step of the eulerian mesh in case we must reuse it later
// cos no particle was found around the nodes though we could've use a bigger buffer, to be changed later!
// after having saved data, we reset them to zero, this way it's easier to add the contribution
// of the surrounding particles.
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(PROJECTED_SCALAR1)=0.0;
inode->FastGetSolutionStepValue(PROJECTED_VECTOR1)=ZeroVector(3);
inode->FastGetSolutionStepValue(YP)=0.0;
}
}
// Adding contribution, loop on elements, since each element has stored the particles found inside of it
std::vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
array_1d<double,3*(TDim+1)> nodes_positions;
array_1d<double,3*(TDim+1)> nodes_added_vector1 = ZeroVector(3*(TDim+1));
array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1));
array_1d<double,(TDim+1)> nodes_added_weights = ZeroVector((TDim+1));
//array_1d<double,(TDim+1)> weighting_inverse_divisor;
Geometry<Node<3> >& geom = ielem->GetGeometry();
for (int i=0 ; i!=(TDim+1) ; ++i)
{
nodes_positions[i*3+0]=geom[i].X();
nodes_positions[i*3+1]=geom[i].Y();
nodes_positions[i*3+2]=geom[i].Z();
//weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01);
}
int & number_of_particles_in_elem= mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii==mMaxNumberOfParticles) // It means we are out of our portion of the array, abort loop!
break;
ShallowParticle& pparticle = element_particle_pointers[offset+iii];
if (pparticle.GetEraseFlag()==false)
{
array_1d<double,3> & position = pparticle.Coordinates();
const float& particle_scalar1 = pparticle.GetScalar1();
const array_1d<float,3>& particle_vector1 = pparticle.GetVector1();
array_1d<double,TDim+1> N;
bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N);
if (is_found==false) // Something went wrong. if it was close enough to the edge we simply send it inside the element.
{
KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl;
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 && N[j]> -1e-5)
N[j]=1e-10;
}
for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element
{
// These lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions
//double sq_dist = 0;
//for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k]));
//double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) );
double weight=N(j)*N(j);
//weight=N(j)*N(j)*N(j);
if (weight<threshold) weight=1e-10;
nodes_added_weights[j] += weight;
nodes_added_scalar1[j] += weight*static_cast<double>(particle_scalar1);
for (int k=0 ; k!=(TDim); k++) //x,y,(z)
{
nodes_added_vector1[j*3+k] += weight * static_cast<double>(particle_vector1[k]);
}
}
}
}
for (int i=0 ; i!=(TDim+1) ; ++i) {
geom[i].SetLock();
geom[i].FastGetSolutionStepValue(PROJECTED_SCALAR1) += nodes_added_scalar1[i];
geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_X) += nodes_added_vector1[3*i+0];
geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_Y) += nodes_added_vector1[3*i+1];
geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_Z) += nodes_added_vector1[3*i+2];
geom[i].FastGetSolutionStepValue(YP) += nodes_added_weights[i];
geom[i].UnSetLock();
}
}
}
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
double sum_weights = inode->FastGetSolutionStepValue(YP);
if (sum_weights>0.00001)
{
double & scalar = inode->FastGetSolutionStepValue(PROJECTED_SCALAR1);
array_1d<double,3> & vector = inode->FastGetSolutionStepValue(PROJECTED_VECTOR1);
scalar /=sum_weights; // resetting the scalar1
vector /=sum_weights; // resetting the vector1
}
else // This should never happen because other ways to recover the information have been executed before, but leaving it just in case..
{
inode->FastGetSolutionStepValue(PROJECTED_SCALAR1)=inode->FastGetSolutionStepValue(mScalarVar1,1); // Resetting the convected scalar
inode->FastGetSolutionStepValue(PROJECTED_VECTOR1)=inode->FastGetSolutionStepValue(mVectorVar1,1); // Resetting the convected vector
}
}
}
KRATOS_CATCH("")
}
/// Update all the particles without moving them
/** This function updates all the particles variables using the
* "delta variables" from the nodal database.
*
* @see CorrectParticleUsingDeltaVariables
*/
void CorrectParticlesWithoutMovingUsingDeltaVariables()
{
KRATOS_TRY
//std::cout << "updating particles" << std::endl;
//ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//(flag managed only by MoveParticles)
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
std::vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Element::Pointer pelement(*ielem.base());
Geometry<Node<3> >& geom = ielem->GetGeometry();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem= mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii>mMaxNumberOfParticles) //it means we are out of our portion of the array, abort loop!
break;
ShallowParticle & pparticle = element_particle_pointers[offset+iii];
bool erase_flag= pparticle.GetEraseFlag();
if (erase_flag==false)
{
CorrectParticleUsingDeltaVariables(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper
}
}
}
}
KRATOS_CATCH("")
}
/// Fill an element with particles
/** This function is to be executed after moving particles and
* before tranferring data from lagrangian particles to eulerian mesh
* If an element finishes with less particles than "minimum number
* of particles", then PreReseed adds particles inside it.
* A minimal reseed is performed in order to not disturb the projection
* from lagrangian to euelrian.
*
* @see MinimumNumberOfParticles
*
* @see MoveParticles
* @see MoveParticleInverseWay: is called to get the particle values
*/
void PreReseed(int MinimumNumberOfParticles)
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const int offset =mOffset;
const int max_results = 1000;
//tools for the paralelization
unsigned int number_of_threads = OpenMPUtils::GetNumThreads();
std::vector<unsigned int> elem_partition;
int number_of_rows = mrModelPart.Elements().size();
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
//KRATOS_WATCH(elem_partition_size);
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition)
{
ResultContainerType results(max_results);
int k = OpenMPUtils::ThisThread();
//ModelPart::ElementsContainerType::iterator it_begin = mrModelPart.ElementsBegin() + elem_partition[k];
//ModelPart::ElementsContainerType::iterator it_end = mrModelPart.ElementsBegin() + elem_partition[k+1] ;
//ModelPart::NodesContainerType local_list=aux[k];
//PointerVectorSet<ShallowParticle, IndexedObject> & list=aux[k];
BoundedMatrix<double, (TDim+1), 3 > pos;
BoundedMatrix<double, (TDim+1) , (TDim+1) > N;
unsigned int freeparticle=0; //we start with the first position in the particles array
//int local_id=1;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
results.resize(max_results);
//const int & elem_id = ielem->Id();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem = mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
if (number_of_particles_in_elem < (MinimumNumberOfParticles)) // && (ielem->GetGeometry())[0].Y()<0.10 )
{
Geometry< Node<3> >& geom = ielem->GetGeometry();
ComputeGaussPointPositionsForPreReseed(geom, pos, N);
for (unsigned int j = 0; j < (pos.size1()); j++) // I am dropping the last one, the one in the middle of the element
{
bool keep_looking = true;
while(keep_looking)
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
mParticlesVector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
freeparticle++;
}
ShallowParticle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux2_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N);
KRATOS_ERROR_IF_NOT( is_found ) <<
"In move shallow water particle utility: particle not found in domain" << std::endl;
pparticle.GetEraseFlag()=false;
ResultIteratorType result_begin = results.begin();
Element::Pointer pelement( *ielem.base() );
MoveParticleInverseWay(pparticle, pelement, result_begin, max_results);
//and we copy it to the array:
mParticlesVector[freeparticle] = pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mParticlesVector[freeparticle];
pparticle.GetEraseFlag()=false;
number_of_particles_in_elem++;
}
}
}
}
KRATOS_CATCH("")
}
/// Fill an element with particles
/** This function is to be executed after the mesh stage solver is
* called and the particles are updated.
* If an element contains less particles than "minimum number of
* particles", then PostReseed adds particles inside it.
* A full reseed is performed and the particle gets it's convected
* variables directly from the eulerian mesh
*
* @param MinimumNumberOfParticles
*
* @see PreReseed
*/
void PostReseed(int MinimumNumberOfParticles) //pooyan's way
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const int offset = mOffset;
//TOOLS FOR THE PARALELIZATION
unsigned int number_of_threads = OpenMPUtils::GetNumThreads();
std::vector<unsigned int> elem_partition;
int number_of_rows=mrModelPart.Elements().size();
//KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", "");
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids
{
unsigned int reused_particles=0;
unsigned int freeparticle = 0; //we start by the first position;
int k = OpenMPUtils::ThisThread();
BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D)
BoundedMatrix<double, (3+2*TDim), (TDim+1) > N;
double mesh_scalar1;
array_1d<double,3> mesh_vector1;
array_1d<int, (3+2*TDim) > positions;
unsigned int number_of_reseeded_particles;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
int & number_of_particles_in_elem = mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
Geometry< Node<3> >& geom = ielem->GetGeometry();
if ( number_of_particles_in_elem < (MinimumNumberOfParticles) ) // && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(MinimumNumberOfParticles) ) )
{
//bool reseed_more=false;
number_of_reseeded_particles = 0;
//reseed_more=true;
number_of_reseeded_particles = 3 + 2*TDim;
ComputeGaussPointPositionsForPostReseed(geom, pos, N);
for (unsigned int j = 0; j < number_of_reseeded_particles; j++)
{
// Now we have to find an empty space (a particle that was about to be deleted) in the
// particles model part. once found. there will be our renewed particle:
bool keep_looking = true;
while(keep_looking)
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
mParticlesVector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
freeparticle++;
}
ShallowParticle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N);
KRATOS_ERROR_IF_NOT( is_found ) <<
"In move shallow water particle utility: particle not found in domain" << std::endl;
mesh_scalar1 = 0.0;
mesh_vector1 = ZeroVector(3);
for (unsigned int l = 0; l < (TDim+1); l++)
{
mesh_scalar1 += N(j,l) * geom[l].FastGetSolutionStepValue(mScalarVar1);
noalias(mesh_vector1) += N(j, l) * geom[l].FastGetSolutionStepValue(mVectorVar1);
}
pparticle.GetScalar1()=mesh_scalar1;
pparticle.GetVector1()=mesh_vector1;
pparticle.GetEraseFlag()=false;
mParticlesVector[freeparticle]=pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mParticlesVector[freeparticle];
number_of_particles_in_elem++;
KRATOS_ERROR_IF( keep_looking ) <<
"In move shallow water particle utility: Finished the list and couldnt find a free cell for the new particle!" << std::endl;
reused_particles++;
}
}
}
}
KRATOS_CATCH("")
}
/// Fill a model part with particles
/** This function prints the particles to a model part
*
* @param rLagrangianModelPart: empty model part to print particles
* @param FilterFactor: the function will print one particle of every "filter factor"
*/
void ExecuteParticlesPrintingTool( ModelPart& rLagrangianModelPart, unsigned int FilterFactor )
{
KRATOS_TRY
// We will only print one out of every "filter factor" particles of the total particle list
if (mParticlePrintingToolInitialized == false)
{
KRATOS_ERROR_IF( rLagrangianModelPart.NodesBegin() - rLagrangianModelPart.NodesEnd() > 0 ) <<
"In move shallow water particle utility: an empty model part is required for the particles printing tool" << std::endl;
rLagrangianModelPart.AddNodalSolutionStepVariable(mScalarVar1);
rLagrangianModelPart.AddNodalSolutionStepVariable(DISPLACEMENT);
for (unsigned int i = 0; i != ((mMaxNumberOfParticles*mNElems)/FilterFactor) + FilterFactor; i++)
{
Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode( i+mLastNodeId+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!!
//pnode->SetBufferSize(mrModelPart.NodesBegin()->GetBufferSize());
pnode->SetBufferSize(1);
}
mParticlePrintingToolInitialized=true;
}
// Resetting data of the unused particles
const double inactive_particle_position = -10.0;
array_1d<double,3>inactive_particle_position_vector;
inactive_particle_position_vector(0)=inactive_particle_position;
inactive_particle_position_vector(1)=inactive_particle_position;
inactive_particle_position_vector(2)=inactive_particle_position;
ModelPart::NodesContainerType::iterator inodebegin = rLagrangianModelPart.NodesBegin();
for(unsigned int ii = 0; ii < rLagrangianModelPart.Nodes().size(); ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(mScalarVar1) = 0.0;
inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector;
}
int counter = 0;
//ModelPart::NodesContainerType::iterator it_begin = rLagrangianModelPart.NodesBegin();
for (int i = 0; i != mMaxNumberOfParticles*mNElems; i++)
{
ShallowParticle& pparticle = mParticlesVector[i];
if(pparticle.GetEraseFlag() == false && i%FilterFactor == 0)
{
ModelPart::NodesContainerType::iterator inode = inodebegin + counter; //copying info from the particle to the (printing) node.
inode->FastGetSolutionStepValue(mScalarVar1) = pparticle.GetScalar1();
inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates();
counter++;
}
}
KRATOS_CATCH("")
}
protected:
private:
/// Move a particle
/** this function moves a particle according to the velocity given
* by VELOCITY variable. The movement is performed in nsubsteps,
* during a total time of DELTA_TIME
*
* @param pParticle
* @param pElement
* @param rElementsInTrajectory
* @param rNumberOfElementsInTrajectory
* @param ResultBegin
* @param MaxNumberOfResults
*
* @see MoveParticles
*/
void MoveParticle(ShallowParticle & pParticle,
Element::Pointer & pElement,
GlobalPointersVector< Element >& rElementsInTrajectory,
unsigned int & rNumberOfElementsInTrajectory,
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool keep_integrating = false;
bool is_found;
array_1d<double,3> vel;
array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3);
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
position = pParticle.Coordinates(); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
keep_integrating=true;
Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in
vel=ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
//calculating substep to get +- courant(substep) = 0.1
nsubsteps = 10.0 * (delta_t * pElement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0;// weight;//*double(nsubsteps);
position += vel*substep_dt;//weight;
// DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH VELOCITY
unsigned int check_from_element_number = 0;
for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle.
{
if (keep_integrating == true)
{
is_found = FindNodeOnMesh(position, N, pElement, rElementsInTrajectory, rNumberOfElementsInTrajectory, check_from_element_number, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
only_integral += 1.0; //values saved for the current time step
position+=vel*substep_dt;//weight;
}
else
{
keep_integrating=false;
break;
}
}
else
break;
}
}
if (keep_integrating == false) (pParticle.GetEraseFlag()=true);
else is_found = FindNodeOnMesh(position, N ,pElement,ResultBegin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pElement)
if (is_found == false) ( pParticle.GetEraseFlag()=true);
pParticle.Coordinates() = position;
}
/// This function updates a particle
/** This function updates a particle variables using the "delta
* variables" from the nodal database.
*
* @param pParticle
* @param pElement
* @param rGeom
*
* @see CorrectParticlesWithoutMovingUsingDeltaVariables
*/
void CorrectParticleUsingDeltaVariables(ShallowParticle & pParticle,
Element::Pointer & pElement,
Geometry< Node<3> >& rGeom)
{
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
array_1d<double,3> coords = pParticle.Coordinates();
float & particle_scalar1 = pParticle.GetScalar1();
array_1d<float,3> & particle_vector1 = pParticle.GetVector1();
//double distance=0.0;
double delta_scalar1 = 0.0;
array_1d<double,3> delta_vector1 = ZeroVector(3);
bool is_found = CalculatePosition(rGeom,coords[0],coords[1],coords[2],N);
if(is_found == false)
{
KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl;
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 )
N[j]=1e-10;
}
for(unsigned int j=0; j<(TDim+1); j++)
{
delta_scalar1 += rGeom[j].FastGetSolutionStepValue(DELTA_SCALAR1)*N[j];
noalias(delta_vector1) += rGeom[j].FastGetSolutionStepValue(DELTA_VECTOR1)*N[j];
}
particle_scalar1 = particle_scalar1 + delta_scalar1;
particle_vector1 = particle_vector1 + delta_vector1;
}
/// Move a particle in the inverse way
/** this function moves a particle according to the -velocity given
* by VELOCITY variable. The movement is performed by a backward
* integration in nsubsteps, during a total time of DELTA_TIME
* Before the particle goes out of the element, gets the value
* of the eulerian mesh and stores it
*
* @param pParticle
* @param pElement
* @param ResultBegin
* @param MaxNumberOfResults
*
* @see PreReseed
*/
void MoveParticleInverseWay(ShallowParticle & pParticle,
Element::Pointer & pElement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO!
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool keep_integrating = false;
bool is_found;
double scalar1 = 0.0;
array_1d<double,3> vector1;
array_1d<double,3> vel;
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
position = pParticle.Coordinates(); // + (pParticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
keep_integrating = true;
Geometry< Node<3> >& geom = pElement->GetGeometry(); //the element we're in
scalar1 = 0.0;
vector1 = ZeroVector(3);
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
scalar1 += geom[j].FastGetSolutionStepValue(mScalarVar1)*N[j];
noalias(vector1) += geom[j].FastGetSolutionStepValue(mVectorVar1)*N[j];
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
//calculating substep to get +- courant(substep) = 1/4
nsubsteps = 10.0 * (delta_t * pElement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0; // weight;//*double(nsubsteps);
position -= vel*substep_dt; //weight;
for(unsigned int i=0; i<(nsubsteps-1); i++) // this is for the substeps n+1. in the first one we already knew the position of the particle.
{
if (keep_integrating == true)
{
is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if (is_found == true)
{
Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in
scalar1 = 0.0;
vector1 = ZeroVector(3);
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
scalar1 += geom[j].FastGetSolutionStepValue(mScalarVar1)*N(j);
noalias(vector1) += geom[j].FastGetSolutionStepValue(mVectorVar1)*N[j];
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
only_integral += 1.0; //weight ; //values saved for the current time step
position -= vel*substep_dt; //weight;
}
else keep_integrating = false;
}
}
pParticle.GetScalar1() = scalar1;
pParticle.GetVector1() = vector1;
}
}
/// Find the element into which a given node is located
/** This function should find the element into which a given node
* is located and return a pointer to the element and the vector
* containing the shape functions that define the positions within
* the element.
* If false is returned the element is not found
*
* @param position of the node
* @param N: return shape functions that define the positions within the elem
* @param pElement: return a pointer to the element
* @param ResultBegin
* @param MaxNumberOfResults
* @return FindNodeOnMesh if the element is found of not
*
* @see CalculatePosition
*/
bool FindNodeOnMesh( const array_1d<double,3>& rPosition,
array_1d<double,TDim+1>& N,
Element::Pointer & pElement,
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_1) //that was easy!
{
return true;
}
// To begin with we check the neighbour elements; it is a bit more expensive
GlobalPointersVector< Element >& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS);
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_2)
{
pElement = neighb_elems[i].shared_from_this();
return true;
}
}
// If checking all the neighbour elements did not work, we have to use the bins
// ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults );
if (results_found>0)
{
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(ResultBegin + i))->GetGeometry();
//find local position
bool is_found_3 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_3)
{
pElement = (*(ResultBegin + i))->shared_from_this();
return true;
}
}
}
//if nothing worked, then:
//not found case
return false;
}
/// Find the element into which a given node is located
/** This function should find the element into which a given node
* is located and return a pointer to the element and the vector
* containing the shape functions that define the positions within
* the element.
* If false is returned the element is not found
* This version includes predefined elements following a trajectory
*
* @param rPosition of the node
* @param N Output shape functions that define the positions within the elem
* @param pElement Output a pointer to the element
* @param rElementsInTrajectory
* @param rNumberOfElementsInTrajectory Output
* @param CheckFromElementNumber
* @param ResultBegin
* @param MaxNumberOfResults
* @return FindNodeOnMesh if the element is found of not
*
* @see CalculatePosition
*/
bool FindNodeOnMesh( const array_1d<double,3>& rPosition,
array_1d<double,TDim+1>& N,
Element::Pointer & pElement,
GlobalPointersVector< Element >& rElementsInTrajectory,
unsigned int & rNumberOfElementsInTrajectory,
unsigned int & rCheckFromElementNumber,
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
//~ const array_1d<double,3>& coords = rPosition;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,rPosition[0],rPosition[1],rPosition[2],N);
if(is_found_1 == true)
{
return true; //that was easy!
}
// If it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element.
for (unsigned int i=(rCheckFromElementNumber);i!=rNumberOfElementsInTrajectory;i++)
{
Geometry<Node<3> >& geom = rElementsInTrajectory[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],aux_N);
if (is_found_2)
{
pElement = rElementsInTrajectory[i].shared_from_this();
N = aux_N;
rCheckFromElementNumber = i+1 ; //now i element matches pElement, so to avoid cheching twice the same element we send the counter to the following element.
return true;
}
}
// Now we check the neighbour elements:
GlobalPointersVector< Element >& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS);
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_2)
{
pElement = neighb_elems[i].shared_from_this();
if (rNumberOfElementsInTrajectory<20)
{
rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement;
rNumberOfElementsInTrajectory++;
rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
// If checking all the neighbour elements did not work, we have to use the bins
// ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults );
if(results_found>0)
{
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(ResultBegin + i))->GetGeometry();
//find local position
bool is_found = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found)
{
pElement = (*(ResultBegin + i))->shared_from_this();
if (rNumberOfElementsInTrajectory<20)
{
rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement;
rNumberOfElementsInTrajectory++;
rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
}
//not found case
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rGeom: the element (a triangle)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom,
const double xc,
const double yc,
const double zc,
array_1d<double,3> & N )
{
double x0 = rGeom[0].X();
double y0 = rGeom[0].Y();
double x1 = rGeom[1].X();
double y1 = rGeom[1].Y();
double x2 = rGeom[2].X();
double y2 = rGeom[2].Y();
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl;
double inv_area = 1.0 / area;
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0)
//if the xc yc is inside the triangle return true
return true;
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rNodesPositions of the element (a triangle)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions,
const double xc,
const double yc,
const double zc,
array_1d<double,3> & N )
{
const double& x0 = rNodesPositions[0];
const double& y0 = rNodesPositions[1];
const double& x1 = rNodesPositions[3];
const double& y1 = rNodesPositions[4];
const double& x2 = rNodesPositions[6];
const double& y2 = rNodesPositions[7];
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl;
double inv_area = 1.0 / area;
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0)
//if the xc yc is inside the triangle return true
return true;
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rGeom: the element (a tetrahedron)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom,
const double xc,
const double yc,
const double zc,
array_1d<double, 4 > & N )
{
double x0 = rGeom[0].X();
double y0 = rGeom[0].Y();
double z0 = rGeom[0].Z();
double x1 = rGeom[1].X();
double y1 = rGeom[1].Y();
double z1 = rGeom[1].Z();
double x2 = rGeom[2].X();
double y2 = rGeom[2].Y();
double z2 = rGeom[2].Z();
double x3 = rGeom[3].X();
double y3 = rGeom[3].Y();
double z3 = rGeom[3].Z();
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl;
double inv_vol = 1.0 / vol;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rNodesPositions of the element (a tetrahedron)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions,
const double xc,
const double yc,
const double zc,
array_1d<double, 4 > & N )
{
const double& x0 = rNodesPositions[0];
const double& y0 = rNodesPositions[1];
const double& z0 = rNodesPositions[2];
const double& x1 = rNodesPositions[3];
const double& y1 = rNodesPositions[4];
const double& z1 = rNodesPositions[5];
const double& x2 = rNodesPositions[6];
const double& y2 = rNodesPositions[7];
const double& z2 = rNodesPositions[8];
const double& x3 = rNodesPositions[9];
const double& y3 = rNodesPositions[10];
const double& z3 = rNodesPositions[11];
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl;
double inv_vol = 1.0 / vol;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
/// Calculate the volume
/** This function computes the area of a triangle
*/
inline double CalculateVol( const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2 )
{
return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0));
}
/// Calculate the volume
/** This function computes the volume of a tetrahedron
*/
inline double CalculateVol( const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3 )
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30;
return detJ * 0.1666666666666666666667;
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_4( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 7, 3 > & pos,
BoundedMatrix<double, 7, 3 > & N )
{
double one_third = 1.0 / 3.0;
double one_sixt = 0.15; //1.0 / 6.0;
double two_third = 0.7; //2.0 * one_third;
N(0, 0) = one_sixt;
N(0, 1) = one_sixt;
N(0, 2) = two_third;
N(1, 0) = two_third;
N(1, 1) = one_sixt;
N(1, 2) = one_sixt;
N(2, 0) = one_sixt;
N(2, 1) = two_third;
N(2, 2) = one_sixt;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
//first
pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X();
pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y();
pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z();
//second
pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X();
pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y();
pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z();
//third
pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X();
pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y();
pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
}
/// Compute the Gauss points
/** For a triangle
*
* @see PostReseed
*/
void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 7, 3 > & pos,
BoundedMatrix<double, 7, 3 > & N ) //2d
{
double one_third = 1.0 / 3.0;
double one_eight = 0.12; //1.0 / 6.0;
double three_quarters = 0.76; //2.0 * one_third;
N(0, 0) = one_eight;
N(0, 1) = one_eight;
N(0, 2) = three_quarters;
N(1, 0) = three_quarters;
N(1, 1) = one_eight;
N(1, 2) = one_eight;
N(2, 0) = one_eight;
N(2, 1) = three_quarters;
N(2, 2) = one_eight;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
N(4, 0) = one_eight;
N(4, 1) = 0.44;
N(4, 2) = 0.44;
N(5, 0) = 0.44;
N(5, 1) = one_eight;
N(5, 2) = 0.44;
N(6, 0) = 0.44;
N(6, 1) = 0.44;
N(6, 2) = one_eight;
//first
pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X();
pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y();
pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z();
//second
pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X();
pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y();
pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z();
//third
pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X();
pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y();
pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
//fifth
pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X();
pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y();
pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z();
//sixth
pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X();
pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y();
pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z();
//seventh
pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X();
pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y();
pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z();
}
/// Compute the Gauss points
/** For a tetrahedron
*
* @see PostReseed
*/
void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 9, 3 > & pos,
BoundedMatrix<double, 9, 4 > & N ) //3D
{
double one_quarter = 0.25;
double small_fraction = 0.1; //1.0 / 6.0;
double big_fraction = 0.7; //2.0 * one_third;
double mid_fraction = 0.3; //2.0 * one_third;
N(0, 0) = big_fraction;
N(0, 1) = small_fraction;
N(0, 2) = small_fraction;
N(0, 3) = small_fraction;
N(1, 0) = small_fraction;
N(1, 1) = big_fraction;
N(1, 2) = small_fraction;
N(1, 3) = small_fraction;
N(2, 0) = small_fraction;
N(2, 1) = small_fraction;
N(2, 2) = big_fraction;
N(2, 3) = small_fraction;
N(3, 0) = small_fraction;
N(3, 1) = small_fraction;
N(3, 2) = small_fraction;
N(3, 3) = big_fraction;
N(4, 0) = one_quarter;
N(4, 1) = one_quarter;
N(4, 2) = one_quarter;
N(4, 3) = one_quarter;
N(5, 0) = small_fraction;
N(5, 1) = mid_fraction;
N(5, 2) = mid_fraction;
N(5, 3) = mid_fraction;
N(6, 0) = mid_fraction;
N(6, 1) = small_fraction;
N(6, 2) = mid_fraction;
N(6, 3) = mid_fraction;
N(7, 0) = mid_fraction;
N(7, 1) = mid_fraction;
N(7, 2) = small_fraction;
N(7, 3) = mid_fraction;
N(8, 0) = mid_fraction;
N(8, 1) = mid_fraction;
N(8, 2) = mid_fraction;
N(8, 3) = small_fraction;
pos=ZeroMatrix(9,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=9; j++) //going through the 9 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
/// Compute the Gauss points
/** For a triangle
*
* @see PreReseed
*/
void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 3, 3 > & pos,
BoundedMatrix<double, 3, 3 > & N ) //2D
{
N(0, 0) = 0.5;
N(0, 1) = 0.25;
N(0, 2) = 0.25;
N(1, 0) = 0.25;
N(1, 1) = 0.5;
N(1, 2) = 0.25;
N(2, 0) = 0.25;
N(2, 1) = 0.25;
N(2, 2) = 0.5;
//first
pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X();
pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y();
pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z();
//second
pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X();
pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y();
pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z();
//third
pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X();
pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y();
pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z();
}
/// Compute the Gauss points
/** For a tetrahedron
*
* @see PreReseed
*/
void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 4, 3 > & pos,
BoundedMatrix<double, 4, 4 > & N ) //3D
{
//creating 4 particles, each will be closer to a node and equidistant to the other nodes
N(0, 0) = 0.4;
N(0, 1) = 0.2;
N(0, 2) = 0.2;
N(0, 3) = 0.2;
N(1, 0) = 0.2;
N(1, 1) = 0.4;
N(1, 2) = 0.2;
N(1, 3) = 0.2;
N(2, 0) = 0.2;
N(2, 1) = 0.2;
N(2, 2) = 0.4;
N(2, 3) = 0.2;
N(3, 0) = 0.2;
N(3, 1) = 0.2;
N(3, 2) = 0.2;
N(3, 3) = 0.4;
pos=ZeroMatrix(4,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=4; j++) //going through the 4 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_45( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 45, 3 > & pos,
BoundedMatrix<double, 45, 3 > & N )
{
unsigned int counter=0;
for (unsigned int i=0; i!=9;i++)
{
for (unsigned int j=0; j!=(9-i);j++)
{
N(counter,0)=0.05+double(i)*0.1;
N(counter,1)=0.05+double(j)*0.1;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
counter++;
}
}
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 15, 3 > & pos,
BoundedMatrix<double, 15, 3 > & N ) //2D
{
unsigned int counter=0;
for (unsigned int i=0; i!=5;i++)
{
for (unsigned int j=0; j!=(5-i);j++)
{
N(counter,0)=0.05+double(i)*0.2;
N(counter,1)=0.05+double(j)*0.2;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
counter++;
}
}
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 20, 3 > & pos,
BoundedMatrix<double, 20, 4 > & N ) //3D
{
double fraction_increment;
unsigned int counter=0;
for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles
{
for (unsigned int j=0; j!=(4-i);j++)
{
for (unsigned int k=0; k!=(4-i-j);k++)
{
N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1)
//total = 1.0 - N(counter,0);
fraction_increment = 0.27; //
N(counter,1)=fraction_increment * (0.175 + double(j));
N(counter,2)=fraction_increment * (0.175 + double(k));
N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z();
counter++;
}
}
}
}
/// check function
virtual int Check()
{
KRATOS_TRY
Node<3>& rnode = *mrModelPart.NodesBegin();
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(mVectorVar1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(mScalarVar1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(VELOCITY, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_VECTOR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_SCALAR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_VECTOR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_SCALAR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(MEAN_SIZE, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(YP, rnode)
return 0;
KRATOS_CATCH("")
}
/// Member variables
ModelPart& mrModelPart;
int mNParticles;
int mNElems;
int mOffset;
int mMaxSubSteps;
double mMaxSubStepDt;
int mMaxNumberOfParticles;
std::vector< ShallowParticle > mParticlesVector;
int mLastElemId;
bool mOddTimeStep;
bool mParticlePrintingToolInitialized;
unsigned int mLastNodeId;
DenseVector<int> mNumOfParticlesInElems;
DenseVector<int> mNumOfParticlesInElemsAux;
DenseVector<ParticlePointerVector> mVectorOfParticlePointersVectors;
typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic;
Variable<double> mScalarVar1;
Variable<array_1d<double,3>> mVectorVar1;
std::string m_scalar_var1_name;
std::string m_vector_var1_name;
}; // class MoveShallowWaterParticleUtility
} // namespace Kratos.
#endif // KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED defined
|
Fig_10.6_taskDep.c | // sample compile command: "gcc -fopenmp -c Fig_10.6_taskDep.c" to generate *.o object file
#include <omp.h>
// functions Awork through Ework not shown
int main()
{
float A, B, C, D, E;
#pragma omp parallel shared(A, B, C, D, E)
{
#pragma omp single
{
#pragma omp task depend(out:A)
Awork(&A);
#pragma omp task depend(out:E)
Ework(&E);
#pragma omp task depend(in:A) depend(out:B)
Bwork(&B);
#pragma omp task depend(in:A) depend(out:C)
Cwork(&C);
#pragma omp task depend(in:B,C,E)
Dwork(&E);
}
}
}
|
mm.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <sys/time.h>
#include <gsl/gsl_cblas.h>
#define N 5000;
void mm_bruteforce_ijk(double *a, double *b, double *c, int I, int K, int J) {
for(int i = 0; i < I; i++) {
for(int j = 0; j < J; j++) {
for(int k = 0; k < K; k++) {
c[i * J + j] += a[i * K + k] * b[k * J + j];
}
}
}
}
void mm_bruteforce_ikj(double *a, double *b, double *c, int I, int K, int J) {
for(int i = 0; i < I; i++) {
for(int k = 0; k < K; k++) {
double dv = a[i * K + k];
for(int j = 0; j < J; j++) {
c[i * J + j] += dv * b[k * J + j];
}
}
}
}
void mm_omp(double *a, double *b, double *c, int I, int K, int J) {
#pragma omp parallel for
for(int i = 0; i < I; i++) {
for(int k = 0; k < K; k++) {
register double dv = a[i * K + k];
for(int j = 0; j < J; j++) {
c[i * J + j] += dv * b[k * J + j];
}
}
}
}
void mm_cblas_dgemm(double *a, double *b, double *c, int p, int q, int r) {
int l = p;
int m = q;
int n = r;
int lda = m;
int ldb = n;
int ldc = n;
double alpha = 1.0;
double beta = 0.0;
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
l, n, m, alpha, a, lda, b, ldb, beta, c, ldc);
}
void init_arange(double *mat, int a, int b) {
for(int i = 0; i < (a*b); i++) {
mat[i] = i + 1;
}
}
void init_value(double *mat, int a, int b, double value) {
for(int i = 0; i < (a*b); i++) {
mat[i] = value;
}
}
void init_zero(double *mat, int a, int b) {
init_value(mat, a, b, 0);
}
void init_one(double *mat, int a, int b) {
init_value(mat, a, b, 1);
}
void timer_start(struct timeval *pstv) {
gettimeofday(pstv, NULL);
}
void timer_end(struct timeval *petv) {
gettimeofday(petv, NULL);
}
void timer_print(struct timeval *pstv, struct timeval *petv) {
time_t sec;
suseconds_t usec;
sec = petv->tv_sec - pstv->tv_sec;
usec = petv->tv_usec - pstv->tv_usec;
if(usec < 0) {
sec--;
usec += 1000000;
}
printf("elapsed time : %ld.%ld\n", sec, usec);
}
typedef void (*fptr_mm)(double *a, double *b, double *c, int I, int K, int J);
void check_etime_mm(double *a, double *b, double *c, int I, int K, int J, fptr_mm mm) {
struct timeval stv;
struct timeval etv;
init_zero(c, I, J);
timer_start(&stv);
mm(a, b, c, I, K, J);
timer_end(&etv);
timer_print(&stv, &etv);
}
int main(int argc, char *argv[]) {
int I, K, J;
I = K = J = N;
double *a = (double*)malloc(sizeof(double) * I * K);
double *b = (double*)malloc(sizeof(double) * K * J);
double *c = (double*)malloc(sizeof(double) * I * J);
init_arange(a, I, K);
init_arange(b, K, J);
check_etime_mm(a, b, c, I, K, J, mm_bruteforce_ijk);
check_etime_mm(a, b, c, I, K, J, mm_bruteforce_ikj);
check_etime_mm(a, b, c, I, K, J, mm_omp);
check_etime_mm(a, b, c, I, K, J, mm_cblas_dgemm);
return 0;
}
|
tmandel1.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
#pragma omp for schedule(dynamic,100) private(col)
for (row = 0; row < height; ++row) {
//#pragma omp for schedule(dynamic,100)
for (col = 0; col < width; ++col) {
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
//printf("%d %d %d\n", row, col, k);
#if _DISPLAY_
/* Scale color and display point */
#pragma omp critical
{
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 10000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
FILE *fp = NULL;
int ** output;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
double stamp;
#pragma omp parallel
{
/* Start timing */
#pragma omp single
{
START_COUNT_TIME;
}
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
#pragma omp single
{
STOP_COUNT_TIME("Total execution time");
}
}
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width)
fprintf(stderr, "Output file not written correctly\n");
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) != 0)
channels++;
}
return(channels == 0 ? (size_t) 1 : channels);
}
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p,
*magick_restrict q;
register Quantum
*magick_restrict r;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
if ((distance*distance) > fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=(double) MagickMin(GetPixelChannels(image),
GetPixelChannels(reconstruct_image))*
GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
distance,
Sa;
MagickBooleanType
difference;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
distance=0.0;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance+=pixel*pixel;
if (distance > fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) p[i]-
GetPixelChannel(reconstruct_image,channel,q));
else
distance=QuantumScale*fabs(Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=fabs((double) p[i]-
GetPixelChannel(reconstruct_image,channel,q));
else
distance=fabs(Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area;
image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
{
distortion[i]+=area*QuantumScale*(p[i]-
image_statistics[channel].mean)*(GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
else
{
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel = GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
GetImageChannels(image));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) p[i]-
GetPixelChannel(reconstruct_image,channel,q));
else
distance=QuantumScale*fabs(Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=10.0*MagickLog10(1.0)-10.0*MagickLog10(distortion[i]);
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
register ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
register ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
#define SSIMRadius 5.0
#define SSIMSigma 1.5
#define SSIMBlocksize 8
#define SSIMK1 0.01
#define SSIMK2 0.03
#define SSIML 1.0
CacheView
*image_view,
*reconstruct_view;
char
geometry[MagickPathExtent];
const char
*artifact;
double
c1,
c2,
radius,
sigma;
KernelInfo
*kernel_info;
MagickBooleanType
status;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Compute structural similarity index @
https://en.wikipedia.org/wiki/Structural_similarity.
*/
radius=SSIMRadius;
artifact=GetImageArtifact(image,"compare:ssim-radius");
if (artifact != (const char *) NULL)
radius=StringToDouble(artifact,(char **) NULL);
sigma=SSIMSigma;
artifact=GetImageArtifact(image,"compare:ssim-sigma");
if (artifact != (const char *) NULL)
sigma=StringToDouble(artifact,(char **) NULL);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
c1=pow(SSIMK1*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k1");
if (artifact != (const char *) NULL)
c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
c2=pow(SSIMK2*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k2");
if (artifact != (const char *) NULL)
c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,reconstruct_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y-
((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/
2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
x_pixel_mu[MaxPixelChannels+1],
x_pixel_sigma_squared[MaxPixelChannels+1],
xy_sigma[MaxPixelChannels+1],
y_pixel_mu[MaxPixelChannels+1],
y_pixel_sigma_squared[MaxPixelChannels+1];
register const Quantum
*magick_restrict reference,
*magick_restrict target;
register MagickRealType
*k;
ssize_t
v;
(void) memset(x_pixel_mu,0,sizeof(x_pixel_mu));
(void) memset(x_pixel_sigma_squared,0,sizeof(x_pixel_sigma_squared));
(void) memset(xy_sigma,0,sizeof(xy_sigma));
(void) memset(x_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
(void) memset(y_pixel_mu,0,sizeof(y_pixel_mu));
(void) memset(y_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
k=kernel_info->values;
reference=p;
target=q;
for (v=0; v < (ssize_t) kernel_info->height; v++)
{
register ssize_t
u;
for (u=0; u < (ssize_t) kernel_info->width; u++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
x_pixel,
y_pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel=QuantumScale*reference[i];
x_pixel_mu[i]+=(*k)*x_pixel;
x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel;
y_pixel=QuantumScale*
GetPixelChannel(reconstruct_image,channel,target);
y_pixel_mu[i]+=(*k)*y_pixel;
y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel;
xy_sigma[i]+=(*k)*x_pixel*y_pixel;
}
k++;
reference+=GetPixelChannels(image);
target+=GetPixelChannels(reconstruct_image);
}
reference+=GetPixelChannels(image)*columns;
target+=GetPixelChannels(reconstruct_image)*columns;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
ssim,
x_pixel_mu_squared,
x_pixel_sigmas_squared,
xy_mu,
xy_sigmas,
y_pixel_mu_squared,
y_pixel_sigmas_squared;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i];
y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i];
xy_mu=x_pixel_mu[i]*y_pixel_mu[i];
xy_sigmas=xy_sigma[i]-xy_mu;
x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared;
y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared;
ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/
((x_pixel_mu_squared+y_pixel_mu_squared+c1)*
(x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2));
channel_distortion[i]+=ssim;
channel_distortion[CompositePixelChannel]+=ssim;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetStructuralSimilarityDistortion)
#endif
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]+=channel_distortion[i];
}
image_view=DestroyCacheView(image_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
distortion[i]/=((double) columns*rows);
}
distortion[CompositePixelChannel]/=((double) columns*rows);
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
kernel_info=DestroyKernelInfo(kernel_info);
return(status);
}
static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=(1.0-(distortion[i]))/2.0;
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
linalg.h | /**
* Copyright (c) 2020, Massachusetts Institute of Technology,
* Cambridge, MA 02139
* All Rights Reserved
* Authors: Jingnan Shi, et al. (see THANKS for the full author list)
* See LICENSE for the license information
*/
#pragma once
#include <iostream>
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/Eigenvalues>
namespace teaser {
/**
* Return the hat map of the provided vector (a skew symmetric matrix).
* @param u 3-by-1 vector
* @param x 3-by-3 skew symmetric matrix
*/
Eigen::Matrix<double, 3, 3> hatmap(const Eigen::Matrix<double, 3, 1>& u) {
Eigen::Matrix<double, 3, 3> x;
// clang-format off
x << 0, -u(2), u(1),
u(2), 0, -u(0),
-u(1), u(0), 0;
// clang-format on
return x;
}
/**
* Vector-vector kronecker product function with fixed-size output
* @tparam NumT
* @tparam N size of the first vector
* @tparam M size of the second vector
* @param v1 [in] first vector
* @param v2 [in] second vector
* @param output [out] output vector
*/
template <typename NumT, int N, int M>
void vectorKron(const Eigen::Matrix<NumT, N, 1>& v1, const Eigen::Matrix<NumT, M, 1>& v2,
Eigen::Matrix<NumT, N * M, 1>* output) {
#pragma omp parallel for shared(v1, v2, output) default(none)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
(*output)[i * M + j] = v1[i] * v2[j];
}
}
}
/**
* Vector-vector kronecker product function with dynamic-size output
* @tparam NumT numerical type for Eigen matrices (double, float, etc.)
* @param v1 [in] first vector
* @param v2 [in] second vector
* @return Result of kronecker product
*/
template <typename NumT, int N, int M>
Eigen::Matrix<NumT, Eigen::Dynamic, 1> vectorKron(const Eigen::Matrix<NumT, N, 1>& v1,
const Eigen::Matrix<NumT, M, 1>& v2)
{
Eigen::Matrix<double, Eigen::Dynamic, 1> output(v1.rows() * v2.rows(), 1);
#pragma omp parallel for shared(v1, v2, output) default(none)
for (int i = 0; i < v1.rows(); ++i) {
for (int j = 0; j < v2.rows(); ++j) {
output[i * v2.rows() + j] = v1[i] * v2[j];
}
}
return output;
}
/**
* Find the nearest (in Frobenius norm) Symmetric Positive Definite matrix to A
*
* See: https://www.sciencedirect.com/science/article/pii/0024379588902236
*
* @tparam NumT numerical type for Eigen matrices (double, float, etc.)
* @param A [in] input matrix
* @param nearestPSD [out] output neaest positive semi-definite matrix
* @param eig_threshold [in] optional threshold of determining the smallest eigen values
*/
template <typename NumT>
void getNearestPSD(const Eigen::Matrix<NumT, Eigen::Dynamic, Eigen::Dynamic>& A,
Eigen::Matrix<NumT, Eigen::Dynamic, Eigen::Dynamic>* nearestPSD) {
assert(A.rows() == A.cols());
nearestPSD->resize(A.rows(), A.cols());
// symmetrize A into B
Eigen::MatrixXd B = (A + A.transpose()) / 2;
// eigendecomposition of B
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> eig_B(B);
Eigen::VectorXd De = eig_B.eigenvalues();
Eigen::MatrixXd De_positive = (De.array() < 0).select(0, De).asDiagonal();
Eigen::MatrixXd Ve = eig_B.eigenvectors();
*nearestPSD = Ve * De_positive * Ve.transpose();
}
} // namespace teaser |
naive_math_impl.h | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
template <typename type>
static void basic_trans_mat_to_c4(const type* input,
type* output,
const int ldin,
const int M,
const int K,
bool pack_k) {
const int m_round = (M + 3) / 4 * 4;
int k_round = (K + 3) / 4 * 4;
if (!pack_k) {
k_round = K;
}
const int m_loop = m_round / 4;
type* zero_buf = new type[K];
memset(zero_buf, 0, K * sizeof(type));
for (int i = 0; i < m_loop; ++i) {
const type* in0 = input + i * 4 * ldin;
const type* in1 = in0 + ldin;
const type* in2 = in1 + ldin;
const type* in3 = in2 + ldin;
if (4 * (i + 1) - M > 0) {
switch (4 * (i + 1) - M) {
case 3:
in1 = zero_buf;
case 2:
in2 = zero_buf;
case 1:
in3 = zero_buf;
default:
break;
}
}
for (int j = 0; j < K; ++j) {
*output++ = *in0++;
*output++ = *in1++;
*output++ = *in2++;
*output++ = *in3++;
}
for (int j = K; j < k_round; ++j) {
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
}
}
delete[] zero_buf;
}
template <typename type, typename type2>
static void basic_gemm_c4(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2)));
memset(tmp_c, 0, m * ldc * sizeof(type2));
#pragma omp parallel for
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data;
if (flag_relu) {
tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
tmp_c[i * ldc + j] = tmp;
}
}
}
//! trans c to c4
basic_trans_mat_to_c4(tmp_c, c, ldc, m, n, false);
free(tmp_c);
}
template <typename type, typename type2>
static void basic_gemm(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
#pragma omp parallel for
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * c[i * ldc + j] + bias_data;
if (flag_relu) {
c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
c[i * ldc + j] = tmp;
}
}
}
}
template <typename type, typename type2>
static void basic_gemv(int m,
int k,
const type* a,
const type* b,
const type2* bias,
type2* c,
type2 alpha,
type2 beta,
bool trans_a = false,
bool flag_bias = false,
int flag_act = false,
float six = 6.f,
float leakey_relu_alpha = 1.f) {
#pragma omp parallel for
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
auto sum = static_cast<type2>(0);
for (int j = 0; j < k; ++j) {
type av;
if (trans_a) {
av = a[j * m + i];
} else {
av = a[i * k + j];
}
sum += av * b[j];
}
type2 tmp = alpha * sum + beta * c[i] + bias_data;
if (flag_act > 0) {
if (flag_act == 1) { // relu
c[i] = tmp > (type2)0 ? tmp : (type2)0;
} else if (flag_act == 2) { // relu 6
c[i] = tmp > (type2)0 ? tmp : (type2)0;
c[i] = c[i] < six ? c[i] : six;
} else if (flag_act == 4) { // leakey relu
c[i] = tmp < (type2)0 ? (type2)(tmp * leakey_relu_alpha) : tmp;
}
} else {
c[i] = tmp;
}
}
}
/**
* \brief basic direct convolution function
*/
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
static void conv_basic(const Dtype1* din,
Dtype2* dout,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w,
int pad_h,
bool flag_bias,
int act_type,
float six = 6.f,
float scale = 1.f) {
Dtype2 beta = 0;
auto src_data = din;
auto dst_data_ref = dout;
auto weights_data = weights;
auto with_bias = flag_bias;
auto bias_data = bias;
int in_num = num;
int out_channels = chout;
int out_h = hout;
int out_w = wout;
int in_channel = chin;
int in_h = hin;
int in_w = win;
int out_c_group = out_channels / group;
int in_c_group = in_channel / group;
for (int n = 0; n < in_num; ++n) {
#pragma omp parallel for collapse(4)
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
for (int oh = 0; oh < out_h; ++oh) {
for (int ow = 0; ow < out_w; ++ow) {
int out_idx = n * group * out_c_group * out_h * out_w +
g * out_c_group * out_h * out_w + oc * out_h * out_w +
oh * out_w + ow;
Dtype2 bias_d = with_bias ? (bias_data[g * out_c_group + oc]) : 0;
dst_data_ref[out_idx] = bias_d; // + dst_data_ref[out_idx] * beta;
for (int ic = 0; ic < in_c_group; ++ic) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
int iw = ow * stride_w - pad_w + kw * (dila_w);
int ih = oh * stride_h - pad_h + kh * (dila_h);
if (iw < 0 || iw >= in_w) continue;
if (ih < 0 || ih >= in_h) continue;
int iidx = n * in_channel * in_h * in_w +
g * in_c_group * in_h * in_w + ic * in_h * in_w +
ih * in_w + iw;
int widx =
g * out_c_group * in_c_group * kernel_h * kernel_w +
oc * in_c_group * kernel_h * kernel_w +
ic * kernel_h * kernel_w + kh * kernel_w + kw;
dst_data_ref[out_idx] += src_data[iidx] * weights_data[widx];
}
}
}
if (act_type > 0) {
// 1-relu 2-relu6 4-leakyrelu
if (act_type == 1) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)0;
} else if (act_type == 2) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)0;
dst_data_ref[out_idx] = dst_data_ref[out_idx] < (Dtype2)six
? dst_data_ref[out_idx]
: (Dtype2)six;
} else if (act_type == 4) {
dst_data_ref[out_idx] =
dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)(dst_data_ref[out_idx] * scale);
} else {
printf("this act type: %d does not support \n", act_type);
}
}
}
}
}
}
}
}
template <typename Dtype>
static void fill_bias_relu(Dtype* tensor,
const Dtype* bias,
int channel,
int channel_size,
bool flag_bias,
bool flag_relu) {
Dtype* data = tensor;
for (int j = 0; j < channel; ++j) {
Dtype bias_c = flag_bias ? bias[j] : 0;
for (int i = 0; i < channel_size; i++) {
data[i] += bias_c;
if (flag_relu) {
data[i] = data[i] > 0 ? data[i] : 0.f;
}
}
data += channel_size;
}
}
template <typename Dtype>
static void do_relu(Dtype* tensor, int size) {
for (int j = 0; j < size; ++j) {
tensor[j] = tensor[j] > 0 ? tensor[j] : (Dtype)0;
}
}
inline bool is_a_ge_zero_and_a_lt_b(int a, int b) {
return static_cast<unsigned>(a) < static_cast<unsigned>(b);
}
template <typename Dtype>
static void col2im(const Dtype* data_col,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h0,
const int pad_h1,
const int pad_w0,
const int pad_w1,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
Dtype* data_im) {
memset(data_im, 0, height * width * channels * sizeof(Dtype));
const int output_h =
(height + pad_h0 + pad_h1 - (dilation_h * (kernel_h - 1) + 1)) /
stride_h +
1;
const int output_w =
(width + pad_w0 + pad_w1 - (dilation_w * (kernel_w - 1) + 1)) / stride_w +
1;
const int channel_size = height * width;
for (int channel = channels; channel--; data_im += channel_size) {
for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
int input_row = -pad_h0 + kernel_row * dilation_h;
for (int output_rows = output_h; output_rows; output_rows--) {
if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
data_col += output_w;
} else {
int input_col = -pad_w0 + kernel_col * dilation_w;
for (int output_col = output_w; output_col; output_col--) {
if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
data_im[input_row * width + input_col] += *data_col;
}
data_col++;
input_col += stride_w;
}
}
input_row += stride_h;
}
}
}
}
}
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
void deconv_basic(const Dtype1* din,
Dtype2* dout,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w0,
int pad_w1,
int pad_h0,
int pad_h1,
bool flag_bias,
bool flag_relu) {
int m = chout * kernel_w * kernel_h / group;
int n = hin * win;
int k = chin / group;
int group_size_in = win * hin * chin / group;
int group_size_coldata = m * n;
int group_size_weights = chin * chout * kernel_w * kernel_h / (group * group);
bool flag_1x1s1p1 = (kernel_w == 1) && (kernel_h == 1) && (stride_h == 1) &&
(stride_w == 1) && (pad_w0 == 0) && (pad_h0 == 0) &&
(pad_w1 == 0) && (pad_h1 == 0) && (dila_w == 1) &&
(dila_h == 1);
Dtype2* workspace_ptr =
static_cast<Dtype2*>(malloc(sizeof(float) * m * n * group));
for (int i = 0; i < num; ++i) {
const Dtype1* din_batch = din + i * chin * hin * win;
Dtype2* dout_batch = dout + i * chout * hout * wout;
Dtype2* col_data = workspace_ptr;
if (flag_1x1s1p1) {
col_data = dout_batch;
}
memset(col_data, 0, sizeof(Dtype2) * group_size_coldata * group);
for (int g = 0; g < group; ++g) {
const Dtype1* din_group = din_batch + g * group_size_in;
const Dtype1* weights_group = weights + g * group_size_weights;
Dtype2* coldata_group = col_data + g * group_size_coldata;
basic_gemm<Dtype1, Dtype2>(true,
false,
m,
n,
k,
1,
weights_group,
m,
din_group,
n,
0,
coldata_group,
n,
nullptr,
false,
(!flag_bias && flag_relu));
}
if (!flag_1x1s1p1) {
col2im(col_data,
chout,
hout,
wout,
kernel_h,
kernel_w,
pad_h0,
pad_h1,
pad_w0,
pad_w1,
stride_h,
stride_w,
dila_h,
dila_w,
dout_batch);
}
//! add bias
if (flag_bias) {
fill_bias_relu(
dout_batch, bias, chout, wout * hout, flag_bias, flag_relu);
}
}
free(workspace_ptr);
}
|
GB_binop__le_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_fp64)
// A.*B function (eWiseMult): GB (_AemultB_01__le_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__le_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__le_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_fp64)
// A*D function (colscale): GB (_AxD__le_fp64)
// D*A function (rowscale): GB (_DxB__le_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__le_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__le_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_fp64)
// C=scalar+B GB (_bind1st__le_fp64)
// C=scalar+B' GB (_bind1st_tran__le_fp64)
// C=A+scalar GB (_bind2nd__le_fp64)
// C=A'+scalar GB (_bind2nd_tran__le_fp64)
// C type: bool
// A type: double
// B,b type: double
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_FP64 || GxB_NO_LE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__le_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__le_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__le_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
uvbke.h | #ifndef UVBKE_H
#define UVBKE_H
void uvbke(Storage3D& ub, Storage3D& vb, const Storage3D& uc, const Storage3D& vc, const Storage3D& cosa,
const Storage3D& rsina) {
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
ub(i, j, k) = ((dt5 * ((uc(i, j - 1, k) + uc(i, j, k)) - ((vc(i - 1, j, k) + vc(i, j, k)) * cosa(i, j, k)))) *
rsina(i, j, k));
}
}
}
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
vb(i, j, k) = ((dt5 * ((vc(i - 1, j, k) + vc(i, j, k)) - ((uc(i, j - 1, k) + uc(i, j, k)) * cosa(i, j, k)))) *
rsina(i, j, k));
}
}
}
}
void uvbke_fullfusion(Storage3D& ub, Storage3D& vb, const Storage3D& uc, const Storage3D& vc, const Storage3D& cosa,
const Storage3D& rsina) {
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
ub(i, j, k) = ((dt5 * ((uc(i, j - 1, k) + uc(i, j, k)) - ((vc(i - 1, j, k) + vc(i, j, k)) * cosa(i, j, k)))) *
rsina(i, j, k));
vb(i, j, k) = ((dt5 * ((vc(i - 1, j, k) + vc(i, j, k)) - ((uc(i, j - 1, k) + uc(i, j, k)) * cosa(i, j, k)))) *
rsina(i, j, k));
}
}
}
}
void uvbke_partialfusion(Storage3D& ub, Storage3D& vb, const Storage3D& uc, const Storage3D& vc, const Storage3D& cosa,
const Storage3D& rsina) {
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
ub(i, j, k) = ((dt5 * ((uc(i, j - 1, k) + uc(i, j, k)) - ((vc(i - 1, j, k) + vc(i, j, k)) * cosa(i, j, k)))) *
rsina(i, j, k));
}
}
}
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
vb(i, j, k) = ((dt5 * ((vc(i - 1, j, k) + vc(i, j, k)) - ((uc(i, j - 1, k) + uc(i, j, k)) * cosa(i, j, k)))) *
rsina(i, j, k));
}
}
}
}
void uvbke_openmp(Storage3D& ub, Storage3D& vb, const Storage3D& uc, const Storage3D& vc, const Storage3D& cosa,
const Storage3D& rsina) {
#pragma omp parallel for
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
ub(i, j, k) = ((dt5 * ((uc(i, j - 1, k) + uc(i, j, k)) - ((vc(i - 1, j, k) + vc(i, j, k)) * cosa(i, j, k)))) *
rsina(i, j, k));
vb(i, j, k) = ((dt5 * ((vc(i - 1, j, k) + vc(i, j, k)) - ((uc(i, j - 1, k) + uc(i, j, k)) * cosa(i, j, k)))) *
rsina(i, j, k));
}
}
}
}
#endif // UVBKE_H
|
conv_dw_dilation_kernel_arm.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#ifndef __CONV_DW_DILATION_KERNEL_ARM_H_
#define __CONV_DW_DILATION_KERNEL_ARM_H_
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "convolution_param.h"
#include "conv_dw_k5_k7_kernel_arm.h"
int conv_dw_dilation_run(float* input_buf, float* weight_buf, float* bias, float* output_buf, int input_h, int input_w,
int channel, int pad, int activation, int num_thread)
{
int channel_size = input_h * input_w;
int mid_w = input_w - pad * 2;
int mid_block_end = (mid_w & -4) + pad;
int mid_end = mid_w + pad;
int w = 0;
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < channel; c++)
{
float* input_buf_c = input_buf + c * channel_size;
float* output_buf_c = output_buf + c * channel_size;
float* weight_buf_c = weight_buf + c * 9;
float bias_c = bias ? bias[c] : 0;
for (int h = 0; h < pad; h++)
{
for (w = 0; w < pad; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad];
tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w];
tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
}
for (; w < mid_block_end; w += 4)
{
float32x4_t tmp_4 = vdupq_n_f32(bias_c);
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[3]), vld1q_f32(input_buf_c + h * input_w + w - pad));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[4]), vld1q_f32(input_buf_c + h * input_w + w));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[5]), vld1q_f32(input_buf_c + h * input_w + w + pad));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[6]),
vld1q_f32(input_buf_c + (h + pad) * input_w + w - pad));
tmp_4 =
vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[7]), vld1q_f32(input_buf_c + (h + pad) * input_w + w));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[8]),
vld1q_f32(input_buf_c + (h + pad) * input_w + w + pad));
tmp_4 = vector_activation(tmp_4, activation);
vst1q_f32(output_buf_c + h * input_w + w, tmp_4);
}
for (; w < mid_end; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad];
tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad];
tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w];
tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
for (; w < input_w; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad];
tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
}
for (int h = pad; h < input_h - pad; h++)
{
for (w = 0; w < pad; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w];
tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad];
tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w];
tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
for (; w < mid_block_end; w += 4)
{
float32x4_t tmp_4 = vdupq_n_f32(bias_c);
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[0]),
vld1q_f32(input_buf_c + (h - pad) * input_w + w - pad));
tmp_4 =
vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[1]), vld1q_f32(input_buf_c + (h - pad) * input_w + w));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[2]),
vld1q_f32(input_buf_c + (h - pad) * input_w + w + pad));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[3]), vld1q_f32(input_buf_c + h * input_w + w - pad));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[4]), vld1q_f32(input_buf_c + h * input_w + w));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[5]), vld1q_f32(input_buf_c + h * input_w + w + pad));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[6]),
vld1q_f32(input_buf_c + (h + pad) * input_w + w - pad));
tmp_4 =
vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[7]), vld1q_f32(input_buf_c + (h + pad) * input_w + w));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[8]),
vld1q_f32(input_buf_c + (h + pad) * input_w + w + pad));
tmp_4 = vector_activation(tmp_4, activation);
vst1q_f32(output_buf_c + h * input_w + w, tmp_4);
}
for (; w < mid_end; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad];
tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w];
tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad];
tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad];
tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad];
tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w];
tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
for (; w < input_w; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad];
tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w];
tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad];
tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
}
for (int h = input_h - pad; h < input_h; h++)
{
for (w = 0; w < pad; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w];
tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
for (; w < mid_block_end; w += 4)
{
float32x4_t tmp_4 = vdupq_n_f32(bias_c);
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[0]),
vld1q_f32(input_buf_c + (h - pad) * input_w + w - pad));
tmp_4 =
vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[1]), vld1q_f32(input_buf_c + (h - pad) * input_w + w));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[2]),
vld1q_f32(input_buf_c + (h - pad) * input_w + w + pad));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[3]), vld1q_f32(input_buf_c + h * input_w + w - pad));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[4]), vld1q_f32(input_buf_c + h * input_w + w));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[5]), vld1q_f32(input_buf_c + h * input_w + w + pad));
tmp_4 = vector_activation(tmp_4, activation);
vst1q_f32(output_buf_c + h * input_w + w, tmp_4);
}
for (; w < mid_end; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad];
tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w];
tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad];
tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
for (; w < input_w; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad];
tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w];
tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
}
}
return 0;
}
#endif
|
bare_concurrent_map.h | #pragma once
#include <omp.h>
#include <functional>
#include <numeric>
#include "bare_map.h"
namespace hpmr {
// A concurrent map that requires providing hash values when use.
template <class K, class V, class H = std::hash<K>>
class BareConcurrentMap {
public:
BareConcurrentMap();
BareConcurrentMap(const BareConcurrentMap& m);
~BareConcurrentMap();
void reserve(const size_t n_keys_min);
void set_max_load_factor(const float max_load_factor);
float get_max_load_factor() const { return max_load_factor; };
size_t get_n_keys() const;
size_t get_n_buckets() const;
float get_load_factor();
void set(
const K& key,
const size_t hash_value,
const V& value,
const std::function<void(V&, const V&)>& reducer = Reducer<V>::overwrite);
void async_set(
const K& key,
const size_t hash_value,
const V& value,
const std::function<void(V&, const V&)>& reducer = Reducer<V>::overwrite);
void sync(const std::function<void(V&, const V&)>& reducer = Reducer<V>::overwrite);
void unset(const K& key, const size_t hash_value);
V get(const K& key, const size_t hash_value, const V& default_value = V());
bool has(const K& key, const size_t hash_value);
void clear();
void clear_and_shrink();
std::string to_string();
void from_string(const std::string& str);
void for_each(
const std::function<void(const K& key, const size_t hash_value, const V& value)>& handler,
const bool verbose = false);
private:
float max_load_factor;
size_t n_segments;
std::vector<BareMap<K, V, H>> segments;
size_t n_threads;
std::vector<BareMap<K, V, H>> thread_caches;
std::vector<omp_lock_t> segment_locks;
constexpr static size_t N_SEGMENTS_PER_THREAD = 8;
bool has_big_prime_factors(const int num);
};
template <class K, class V, class H>
BareConcurrentMap<K, V, H>::BareConcurrentMap() {
max_load_factor = BareMap<K, V, H>::DEFAULT_MAX_LOAD_FACTOR;
n_threads = omp_get_max_threads();
thread_caches.resize(n_threads);
if (!has_big_prime_factors(n_threads)) {
throw std::invalid_argument("N threads has big prime factors, which affects performance.");
}
n_segments = n_threads * N_SEGMENTS_PER_THREAD;
segments.resize(n_segments);
segment_locks.resize(n_segments);
for (auto& lock : segment_locks) omp_init_lock(&lock);
}
template <class K, class V, class H>
BareConcurrentMap<K, V, H>::BareConcurrentMap(const BareConcurrentMap& m) {
max_load_factor = m.max_load_factor;
n_threads = omp_get_max_threads();
thread_caches.resize(n_threads);
n_segments = m.n_segments;
segments = m.segments;
segment_locks.resize(n_segments);
for (auto& lock : segment_locks) omp_init_lock(&lock);
}
template <class K, class V, class H>
BareConcurrentMap<K, V, H>::~BareConcurrentMap() {
for (auto& lock : segment_locks) omp_destroy_lock(&lock);
}
template <class K, class V, class H>
void BareConcurrentMap<K, V, H>::reserve(const size_t n_keys_min) {
const size_t n_segment_keys_min = n_keys_min / n_segments;
for (size_t i = 0; i < n_segments; i++) segments.at(i).reserve(n_segment_keys_min);
const size_t n_thread_keys_est = n_keys_min / 1000;
for (size_t i = 0; i < n_threads; i++) thread_caches.at(i).reserve(n_thread_keys_est);
};
template <class K, class V, class H>
void BareConcurrentMap<K, V, H>::set_max_load_factor(const float max_load_factor) {
this->max_load_factor = max_load_factor;
for (size_t i = 0; i < n_segments; i++) segments.at(i).max_load_factor = max_load_factor;
for (size_t i = 0; i < n_threads; i++) thread_caches.at(i).max_load_factor = max_load_factor;
}
template <class K, class V, class H>
size_t BareConcurrentMap<K, V, H>::get_n_keys() const {
size_t n_keys = 0;
for (size_t i = 0; i < n_segments; i++) n_keys += segments.at(i).get_n_keys();
return n_keys;
}
template <class K, class V, class H>
size_t BareConcurrentMap<K, V, H>::get_n_buckets() const {
size_t n_buckets = 0;
for (size_t i = 0; i < n_segments; i++) n_buckets += segments.at(i).get_n_buckets();
return n_buckets;
}
template <class K, class V, class H>
void BareConcurrentMap<K, V, H>::async_set(
const K& key,
const size_t hash_value,
const V& value,
const std::function<void(V&, const V&)>& reducer) {
const size_t segment_id = hash_value % n_segments;
auto& lock = segment_locks[segment_id];
if (omp_test_lock(&lock)) {
segments.at(segment_id).set(key, hash_value, value, reducer);
omp_unset_lock(&lock);
} else {
const int thread_id = omp_get_thread_num();
thread_caches.at(thread_id).set(key, hash_value, value, reducer);
}
}
template <class K, class V, class H>
void BareConcurrentMap<K, V, H>::sync(const std::function<void(V&, const V&)>& reducer) {
#pragma omp parallel
{
const int thread_id = omp_get_thread_num();
const auto& handler = [&](const K& key, const size_t hash_value, const V& value) {
const size_t segment_id = hash_value % n_segments;
auto& lock = segment_locks[segment_id];
omp_set_lock(&lock);
segments.at(segment_id).set(key, hash_value, value, reducer);
omp_unset_lock(&lock);
};
thread_caches.at(thread_id).for_each(handler);
thread_caches.at(thread_id).clear();
}
}
template <class K, class V, class H>
void BareConcurrentMap<K, V, H>::set(
const K& key,
const size_t hash_value,
const V& value,
const std::function<void(V&, const V&)>& reducer) {
const size_t segment_id = hash_value % n_segments;
auto& lock = segment_locks[segment_id];
omp_set_lock(&lock);
segments.at(segment_id).set(key, hash_value, value, reducer);
omp_unset_lock(&lock);
}
template <class K, class V, class H>
V BareConcurrentMap<K, V, H>::get(const K& key, const size_t hash_value, const V& default_value) {
const size_t segment_id = hash_value % n_segments;
auto& lock = segment_locks[segment_id];
omp_set_lock(&lock);
V res = segments.at(segment_id).get(key, hash_value, default_value);
omp_unset_lock(&lock);
return res;
}
template <class K, class V, class H>
void BareConcurrentMap<K, V, H>::unset(const K& key, const size_t hash_value) {
const size_t segment_id = hash_value % n_segments;
auto& lock = segment_locks[segment_id];
omp_set_lock(&lock);
segments.at(segment_id).unset(key, hash_value);
omp_unset_lock(&lock);
}
template <class K, class V, class H>
bool BareConcurrentMap<K, V, H>::has(const K& key, const size_t hash_value) {
const size_t segment_id = hash_value % n_segments;
auto& lock = segment_locks[segment_id];
omp_set_lock(&lock);
bool res = segments.at(segment_id).has(key, hash_value);
omp_unset_lock(&lock);
return res;
}
template <class K, class V, class H>
void BareConcurrentMap<K, V, H>::clear() {
for (size_t i = 0; i < n_segments; i++) segments.at(i).clear();
for (size_t i = 0; i < n_threads; i++) thread_caches.at(i).clear();
}
template <class K, class V, class H>
void BareConcurrentMap<K, V, H>::clear_and_shrink() {
for (size_t i = 0; i < n_segments; i++) segments.at(i).clear_and_shrink();
for (size_t i = 0; i < n_threads; i++) thread_caches.at(i).clear_and_shrink();
}
template <class K, class V, class H>
std::string BareConcurrentMap<K, V, H>::to_string() {
std::vector<std::string> ostrs(n_segments);
size_t total_size = 0;
#pragma omp parallel for
for (size_t i = 0; i < n_segments; i++) {
auto& lock = segment_locks[i];
omp_set_lock(&lock);
hps::serialize_to_string(segments.at(i), ostrs.at(i));
omp_unset_lock(&lock);
#pragma omp atomic
total_size += ostrs[i].size();
}
std::string str;
str.reserve(total_size + n_segments * 8);
hps::OutputBuffer<std::string> ob_str(str);
hps::Serializer<float, std::string>::serialize(max_load_factor, ob_str);
for (size_t i = 0; i < n_segments; i++) {
hps::Serializer<std::string, std::string>::serialize(ostrs[i], ob_str);
}
ob_str.flush();
return str;
}
template <class K, class V, class H>
void BareConcurrentMap<K, V, H>::from_string(const std::string& str) {
std::vector<std::string> istrs(n_segments);
hps::InputBuffer<std::string> ib_str(str);
hps::Serializer<float, std::string>::parse(max_load_factor, ib_str);
for (size_t i = 0; i < n_segments; i++) {
hps::Serializer<std::string, std::string>::parse(istrs[i], ib_str);
}
#pragma omp parallel for
for (size_t i = 0; i < n_segments; i++) {
auto& lock = segment_locks[i];
omp_set_lock(&lock);
hps::parse_from_string(segments.at(i), istrs[i]);
omp_unset_lock(&lock);
}
}
template <class K, class V, class H>
void BareConcurrentMap<K, V, H>::for_each(
const std::function<void(const K& key, const size_t hash_value, const V& value)>& handler,
const bool verbose) {
#pragma omp parallel for schedule(static, 1)
for (size_t i = 0; i < n_segments; i++) {
segments.at(i).for_each(handler);
if (verbose && omp_get_thread_num() == 0) {
printf("%zu/%zu ", i / n_threads, N_SEGMENTS_PER_THREAD);
}
}
if (verbose) printf("#\n");
}
template <class K, class V, class H>
bool BareConcurrentMap<K, V, H>::has_big_prime_factors(const int num) {
constexpr int SMALL_PRIMES[] = {2, 3, 5, 7};
constexpr int N_SMALL_PRIMES = sizeof(SMALL_PRIMES) / sizeof(int);
int remain = num;
for (int i = 0; i < N_SMALL_PRIMES; i++) {
const int prime = SMALL_PRIMES[i];
while (remain % prime == 0) remain /= prime;
}
return remain == 1;
}
} // namespace hpmr
|
floyd-warshall.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 1024. */
#include "floyd-warshall.h"
/* Array initialization. */
static
void init_array (int n,
DATA_TYPE POLYBENCH_2D(path,N,N,n,n))
{
int i __attribute__((annotate("scalar(range(0, " PB_XSTR(N) ")) final")));
int j __attribute__((annotate("scalar(range(0, " PB_XSTR(N) ")) final")));
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
path[i][j] = ((DATA_TYPE) (i+1)*(j+1)) / n;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n,
DATA_TYPE POLYBENCH_2D(path,N,N,n,n))
{
int i, j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, path[i][j]);
if ((i * n + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_floyd_warshall(int n,
DATA_TYPE POLYBENCH_2D(path,N,N,n,n))
{
int i, j, k;
#pragma scop
for (k = 0; k < _PB_N; k++)
{
#pragma omp parallel for shared (k) private (j)
for(i = 0; i < _PB_N; i++)
for (j = 0; j < _PB_N; j++)
{
DATA_TYPE __attribute__((annotate("scalar(range(0, " PB_XSTR(N) "))"))) path_old = path[i][j];
DATA_TYPE __attribute__((annotate("scalar(range(0, " PB_XSTR(2*N) "))"))) path_new = path[i][k] + path[k][j];
#pragma omp critical
path[i][j] = (path[i][j] < path_new)
? path[i][j]
: path_new;
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(path, DATA_TYPE __attribute__((annotate("target('path') scalar(range(0, " PB_XSTR(N) "))"))), N, N, n, n);
/* Initialize array(s). */
init_array (n, POLYBENCH_ARRAY(path));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_floyd_warshall (n, POLYBENCH_ARRAY(path));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(path)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(path);
return 0;
}
|
a.30.1.c | /* { dg-do compile } */
void
a30 (int n, float *a, float *b)
{
int i;
#pragma omp parallel
{
#pragma omp for lastprivate(i)
for (i = 0; i < n - 1; i++)
a[i] = b[i] + b[i + 1];
}
a[i] = b[i]; /* i == n-1 here */
}
|
isogeometric_posteriori_estimator.h | //
// Project Name: Kratos
// Last Modified by: $Author: hbui $
// Date: $Date: 28 May 2015 $
// Revision: $Revision: 1.0 $
//
//
#if !defined(KRATOS_ISOGEOMETRIC_POSTERIORI_ESTIMATOR_H_INCLUDED )
#define KRATOS_ISOGEOMETRIC_POSTERIORI_ESTIMATOR_H_INCLUDED
// System includes
#include <string>
#include <vector>
#include <iostream>
// External includes
#include <omp.h>
#include "boost/progress.hpp"
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "utilities/math_utils.h"
#include "spaces/ublas_space.h"
#include "linear_solvers/linear_solver.h"
#include "isogeometric_application/custom_geometries/isogeometric_geometry.h"
namespace Kratos
{
///@addtogroup ApplicationNameApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
extern Variable<bool> HAS_STRAIN_AT_NODE;
extern Variable<bool> HAS_STRESSES_AT_NODE;
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/*** Detail class definition.
*/
class IsogeometricPosterioriEstimator
{
public:
///@name Type Definitions
///@{
typedef typename ModelPart::NodesContainerType NodesArrayType;
typedef typename ModelPart::ElementsContainerType ElementsArrayType;
typedef typename ModelPart::ConditionsContainerType ConditionsArrayType;
typedef typename Element::GeometryType GeometryType;
typedef IsogeometricGeometry<GeometryType::PointType> IsogeometricGeometryType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
typedef UblasSpace<double, CompressedMatrix, Vector> SerialSparseSpaceType;
typedef UblasSpace<double, Matrix, Vector> SerialDenseSpaceType;
typedef LinearSolver<SerialSparseSpaceType, SerialDenseSpaceType> LinearSolverType;
/// Pointer definition of IsogeometricPosterioriEstimator
KRATOS_CLASS_POINTER_DEFINITION(IsogeometricPosterioriEstimator);
///@}
///@name Life Cycle
///@{
/// Default constructor.
IsogeometricPosterioriEstimator()
{
}
/// Destructor.
virtual ~IsogeometricPosterioriEstimator()
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/// Simple posteriori error estimator based on nodal stress & strain
/// Reference: Matlab Implementation of the Finite Element Method in Elasticity, Alberty et al
double ComputeSimplePosterioriError(ModelPart& r_model_part);
void ComputeSimplePosterioriErrorOnNodes(const Variable<double>& rThisVariable,
ModelPart& r_model_part,
LinearSolverType::Pointer pSolver);
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
std::stringstream buffer;
buffer << "A collection of posteriori estimators for isogeometric method";
return buffer.str();
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
rOStream << Info();
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/// Calculate vector variable (Stress, Strain) at a local point
Vector& CalculateOnPoint(const Variable<Vector>& rVariable,
Vector& rResult,
Element::Pointer& pElement,
const CoordinatesArrayType& rCoordinates)
{
Vector N;
pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates);
for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i)
{
Vector& NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable);
if(i == 0)
{
if(rResult.size() != NodalValues.size())
rResult.resize(NodalValues.size());
noalias(rResult) = N( i ) * NodalValues;
}
else
{
noalias(rResult) += N( i ) * NodalValues;
}
}
return rResult;
}
//**********AUXILIARY FUNCTION**************************************************************
//******************************************************************************************
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while ( i != endit && (*i) != candidate)
{
++i;
}
if( i == endit )
{
v.push_back(candidate);
}
}
//**********AUXILIARY FUNCTION**************************************************************
//******************************************************************************************
void ConstructMatrixStructure (
SerialSparseSpaceType::MatrixType& A,
ElementsArrayType& rElements,
std::map<unsigned int, unsigned int> MapNodeIdToVec,
ProcessInfo& CurrentProcessInfo
)
{
std::size_t equation_size = A.size1();
std::vector<std::vector<std::size_t> > indices(equation_size);
Element::EquationIdVectorType ids;
for(typename ElementsArrayType::iterator i_element = rElements.begin() ; i_element != rElements.end() ; ++i_element)
{
ids.resize((i_element)->GetGeometry().size());
for(unsigned int i = 0; i < (i_element)->GetGeometry().size(); ++i)
ids[i] = MapNodeIdToVec[(i_element)->GetGeometry()[i].Id()];
for(std::size_t i = 0 ; i < ids.size() ; ++i)
{
if(ids[i] < equation_size)
{
std::vector<std::size_t>& row_indices = indices[ids[i]];
for(std::size_t j = 0 ; j < ids.size() ; ++j)
{
if(ids[j] < equation_size)
AddUnique(row_indices, ids[j]);
}
}
}
}
//allocating the memory needed
int data_size = 0;
for(std::size_t i = 0 ; i < indices.size() ; ++i)
{
data_size += indices[i].size();
}
A.reserve(data_size, false);
//filling with zero the matrix (creating the structure)
#ifndef _OPENMP
for(std::size_t i = 0 ; i < indices.size() ; i++)
{
std::vector<std::size_t>& row_indices = indices[i];
std::sort(row_indices.begin(), row_indices.end());
for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++)
{
A.push_back(i, *it, 0.00);
}
row_indices.clear();
}
#else
int number_of_threads = omp_get_max_threads();
vector<unsigned int> matrix_partition;
CreatePartition(number_of_threads, indices.size(), matrix_partition);
for( int k=0; k < number_of_threads; ++k )
{
#pragma omp parallel
if( omp_get_thread_num() == k )
{
for( std::size_t i = matrix_partition[k]; i < matrix_partition[k+1]; i++ )
{
std::vector<std::size_t>& row_indices = indices[i];
std::sort(row_indices.begin(), row_indices.end());
for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++)
{
A.push_back(i, *it, 0.00);
}
row_indices.clear();
}
}
}
#endif
}
//**********AUXILIARY FUNCTION**************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(unsigned int i = 1; i < number_of_threads; ++i)
partitions[i] = partitions[i-1] + partition_size ;
}
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
IsogeometricPosterioriEstimator& operator=(IsogeometricPosterioriEstimator const& rOther)
{
return *this;
}
/// Copy constructor.
IsogeometricPosterioriEstimator(IsogeometricPosterioriEstimator const& rOther)
{
}
///@}
}; // Class IsogeometricPosterioriEstimator
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >>(std::istream& rIStream, IsogeometricPosterioriEstimator& rThis)
{
return rIStream;
}
/// output stream function
inline std::ostream& operator <<(std::ostream& rOStream,
const IsogeometricPosterioriEstimator& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
}// namespace Kratos.
#undef DEBUG_LEVEL1
#undef DEBUG_LEVEL2
#undef DEBUG_MULTISOLVE
#undef DEBUG_GENERATE_MESH
#undef ENABLE_PROFILING
#endif
|
core_zttlqt.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include "core_blas.h"
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <omp.h>
// This will be swapped during the automatic code generation.
#undef REAL
#define COMPLEX
/***************************************************************************//**
*
* @ingroup core_ttlqt
*
* Computes an LQ factorization of a rectangular matrix
* formed by coupling side-by-side an m-by-m lower triangular tile A1
* and an m-by-n lower triangular tile A2:
*
* | A1 A2 | = L * Q
*
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the tile A1 and A2. m >= 0.
* The number of columns of the tile A1.
*
* @param[in] n
* The number of columns of the tile A2. n >= 0.
*
* @param[in] ib
* The inner-blocking size. ib >= 0.
*
* @param[in,out] A1
* On entry, the m-by-m tile A1.
* On exit, the elements on and below the diagonal of the array
* contain the m-by-m lower trapezoidal tile L;
* the elements above the diagonal are not referenced.
*
* @param[in] lda1
* The leading dimension of the array A1. lda1 >= max(1,m).
*
* @param[in,out] A2
* On entry, the m-by-n lower triangular tile A2.
* On exit, the elements on and below the diagonal of the array
* with the matrix T represent
* the unitary tile Q as a product of elementary reflectors.
*
* @param[in] lda2
* The leading dimension of the array A2. lda2 >= max(1,m).
*
* @param[out] T
* The ib-by-m triangular factor T of the block reflector.
* T is upper triangular by block (economic storage);
* The rest of the array is not referenced.
*
* @param[in] ldt
* The leading dimension of the array T. ldt >= ib.
*
* @param tau
* Auxiliary workspace array of length m.
*
* @param work
* Auxiliary workspace array of length ib*m.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
int core_zttlqt(int m, int n, int ib,
plasma_complex64_t *A1, int lda1,
plasma_complex64_t *A2, int lda2,
plasma_complex64_t *T, int ldt,
plasma_complex64_t *tau,
plasma_complex64_t *work)
{
// Check input arguments.
if (m < 0) {
coreblas_error("illegal value of m");
return -1;
}
if (n < 0) {
coreblas_error("illegal value of n");
return -2;
}
if (ib < 0) {
coreblas_error("illegal value of ib");
return -3;
}
if (A1 == NULL) {
coreblas_error("NULL A1");
return -4;
}
if (lda1 < imax(1, m) && m > 0) {
coreblas_error("illegal value of lda1");
return -5;
}
if (A2 == NULL) {
coreblas_error("NULL A2");
return -6;
}
if (lda2 < imax(1, m) && m > 0) {
coreblas_error("illegal value of lda2");
return -7;
}
if (T == NULL) {
coreblas_error("NULL T");
return -8;
}
if (ldt < imax(1, ib) && ib > 0) {
coreblas_error("illegal value of ldt");
return -9;
}
if (tau == NULL) {
coreblas_error("NULL tau");
return -10;
}
if (work == NULL) {
coreblas_error("NULL work");
return -11;
}
// quick return
if ((m == 0) || (n == 0) || (ib == 0))
return PlasmaSuccess;
// TODO: Need to check why some cases require this to avoid
// uninitialized values
//core_zlaset(PlasmaGeneral, ib, m, 0.0, 0.0, T, ldt);
for (int ii = 0; ii < m; ii += ib) {
int sb = imin(m-ii, ib);
for (int i = 0; i < sb; i++) {
int j = ii + i;
int mi = sb-i-1;
int ni = imin( j + 1, n);
// Generate elementary reflector H(ii*ib+i) to annihilate
// A(ii*ib+i, ii*ib+i:m).
#ifdef COMPLEX
LAPACKE_zlacgv_work(ni, &A2[j], lda2);
LAPACKE_zlacgv_work(1, &A1[lda1*j+j], lda1);
#endif
LAPACKE_zlarfg_work(ni+1, &A1[lda1*j+j], &A2[j], lda2, &tau[j]);
plasma_complex64_t alpha;
if (mi > 0) {
// Apply H(j-1) to A(j:ii+ib-1, j-1:m) from the right.
cblas_zcopy(
mi,
&A1[lda1*j+(j+1)], 1,
work, 1);
plasma_complex64_t zone = 1.0;
cblas_zgemv(
CblasColMajor, (CBLAS_TRANSPOSE)PlasmaNoTrans,
mi, ni,
CBLAS_SADDR(zone), &A2[j+1], lda2,
&A2[j], lda2,
CBLAS_SADDR(zone), work, 1);
alpha = -(tau[j]);
cblas_zaxpy(
mi, CBLAS_SADDR(alpha),
work, 1,
&A1[lda1*j+j+1], 1);
cblas_zgerc(
CblasColMajor, mi, ni,
CBLAS_SADDR(alpha), work, 1,
&A2[j], lda2,
&A2[j+1], lda2);
}
// Calculate T.
if (i > 0 ) {
int l = imin(i, imax(0, n-ii));
alpha = -(tau[j]);
core_zpemv(
PlasmaNoTrans, PlasmaRowwise,
i, imin(j, n), l,
alpha, &A2[ii], lda2,
&A2[j], lda2,
0.0, &T[ldt*j], 1,
work);
// T(0:i-1, j) = T(0:i-1, ii:j-1) * T(0:i-1, j)
cblas_ztrmv(
CblasColMajor, (CBLAS_UPLO)PlasmaUpper,
(CBLAS_TRANSPOSE)PlasmaNoTrans,
(CBLAS_DIAG)PlasmaNonUnit,
i, &T[ldt*ii], ldt,
&T[ldt*j], 1);
}
#ifdef COMPLEX
LAPACKE_zlacgv_work(ni, &A2[j], lda2 );
LAPACKE_zlacgv_work(1, &A1[lda1*j+j], lda1 );
#endif
T[ldt*j+i] = tau[j];
}
// Apply Q to the rest of the matrix to the right.
if (m > ii+sb) {
int mi = m-(ii+sb);
int ni = imin(ii+sb, n);
int l = imin(sb, imax(0, ni-ii));
core_zparfb(
PlasmaRight, PlasmaNoTrans,
PlasmaForward, PlasmaRowwise,
mi, ib, mi, ni, sb, l,
&A1[lda1*ii+ii+sb], lda1,
&A2[ii+sb], lda2,
&A2[ii], lda2,
&T[ldt*ii], ldt,
work, m);
}
}
return PlasmaSuccess;
}
/******************************************************************************/
void core_omp_zttlqt(int m, int n, int ib,
plasma_complex64_t *A1, int lda1,
plasma_complex64_t *A2, int lda2,
plasma_complex64_t *T, int ldt,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A1[0:lda1*m]) \
depend(inout:A2[0:lda2*n]) \
depend(out:T[0:ib*m]) // T should be mxib, but is stored
// as ibxm
{
if (sequence->status == PlasmaSuccess) {
// Prepare workspaces.
int tid = omp_get_thread_num();
plasma_complex64_t *tau = ((plasma_complex64_t*)work.spaces[tid]);
// Call the kernel.
int info = core_zttlqt(m, n, ib,
A1, lda1,
A2, lda2,
T, ldt,
tau,
tau+m);
if (info != PlasmaSuccess) {
plasma_error("core_ztslqt() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
ex5.c | #include <stdio.h>
#include <omp.h>
#include "timer.h"
int main(int argc, char **argv)
{
const long N = 10000;
const long M = 100000;
double tempo, fim, inicio;
int i, j, cont;
GET_TIME(inicio);
#pragma omp parallel for
for (i = 0; i < N; i++)
for(j = 0; j < M; j++)
cont = cont + 1;
GET_TIME(fim);
tempo = fim - inicio;
printf("Tempo: %.8lf\n", tempo);
return 0;
}
|
hello.c | /*
1. how to include header
2. parallel region
3. runtime routine
4. undeterminated execution order
5. control number of threads
By C. Liao
*/
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
int main(void)
{
int i=0, j=0;
#pragma omp parallel default(shared) private(i)
{
#ifdef _OPENMP
i=omp_get_thread_num()+j;
#endif
printf("Hello,world! I am thread %d\n",i);
}
return 0;
}
|
openmp_wrapper.h | /*!
* Copyright (c) 2017 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_OPENMP_WRAPPER_H_
#define LIGHTGBM_OPENMP_WRAPPER_H_
#ifdef _OPENMP
#include <LightGBM/utils/log.h>
#include <omp.h>
#include <exception>
#include <memory>
#include <mutex>
#include <stdexcept>
#include <vector>
inline int OMP_NUM_THREADS() {
int ret = 1;
#pragma omp parallel
#pragma omp master
{ ret = omp_get_num_threads(); }
return ret;
}
class ThreadExceptionHelper {
public:
ThreadExceptionHelper() {
ex_ptr_ = nullptr;
}
~ThreadExceptionHelper() {
ReThrow();
}
void ReThrow() {
if (ex_ptr_ != nullptr) {
std::rethrow_exception(ex_ptr_);
}
}
void CaptureException() {
// only catch first exception.
if (ex_ptr_ != nullptr) { return; }
std::unique_lock<std::mutex> guard(lock_);
if (ex_ptr_ != nullptr) { return; }
ex_ptr_ = std::current_exception();
}
private:
std::exception_ptr ex_ptr_;
std::mutex lock_;
};
#define OMP_INIT_EX() ThreadExceptionHelper omp_except_helper
#define OMP_LOOP_EX_BEGIN() try {
#define OMP_LOOP_EX_END() \
} \
catch (std::exception & ex) { \
Log::Warning(ex.what()); \
omp_except_helper.CaptureException(); \
} \
catch (...) { \
omp_except_helper.CaptureException(); \
}
#define OMP_THROW_EX() omp_except_helper.ReThrow()
#else
/*
* To be compatible with openmp, define a nothrow macro which is used by gcc
* openmp, but not by clang.
* See also https://github.com/dmlc/dmlc-core/blob/main/include/dmlc/omp.h#L14
*/
#if defined(__clang__)
#undef __GOMP_NOTHROW
#define __GOMP_NOTHROW
#elif defined(__cplusplus)
#undef __GOMP_NOTHROW
#define __GOMP_NOTHROW throw()
#else
#undef __GOMP_NOTHROW
#define __GOMP_NOTHROW __attribute__((__nothrow__))
#endif
#ifdef _MSC_VER
#pragma warning(disable : 4068) // disable unknown pragma warning
#endif
#ifdef __cplusplus
extern "C" {
#endif
/** Fall here if no OPENMP support, so just
simulate a single thread running.
All #pragma omp should be ignored by the compiler **/
inline void omp_set_num_threads(int) __GOMP_NOTHROW {}
inline int omp_get_num_threads() __GOMP_NOTHROW {return 1;}
inline int omp_get_max_threads() __GOMP_NOTHROW {return 1;}
inline int omp_get_thread_num() __GOMP_NOTHROW {return 0;}
inline int OMP_NUM_THREADS() __GOMP_NOTHROW { return 1; }
#ifdef __cplusplus
} // extern "C"
#endif
#define OMP_INIT_EX()
#define OMP_LOOP_EX_BEGIN()
#define OMP_LOOP_EX_END()
#define OMP_THROW_EX()
#endif
#endif /* LIGHTGBM_OPENMP_WRAPPER_H_ */
|
shallow_water_residual_based_bdf_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Miguel Maso Sotomayor
//
#ifndef KRATOS_SHALLOW_WATER_RESIDUAL_BASED_BDF_SCHEME_H_INCLUDED
#define KRATOS_SHALLOW_WATER_RESIDUAL_BASED_BDF_SCHEME_H_INCLUDED
// System includes
// External includes
// Project includes
#include "shallow_water_application_variables.h"
#include "custom_utilities/flow_rate_slip_utility.h"
#include "solving_strategies/schemes/residual_based_bdf_scheme.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ShallowWaterResidualBasedBDFScheme
* @ingroup KratosShallowWaterApplication
* @brief BDF integration scheme (for dynamic problems)
* @details The \f$n\f$ order Backward Differentiation Formula (BDF) method is a two step \f$n\f$ order accurate method.
* This scheme is designed to solve a system of the type:
* \f[
* \mathbf{M} \frac{du_{n0}}{dt} + \mathbf{K} u_{n0} = \mathbf{f}_{ext}
* \f]
* @author Miguel Maso Sotomayor
*/
template<class TSparseSpace, class TDenseSpace>
class ShallowWaterResidualBasedBDFScheme
: public ResidualBasedBDFScheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( ShallowWaterResidualBasedBDFScheme );
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename BaseType::Pointer BaseTypePointer;
typedef ResidualBasedBDFScheme<TSparseSpace,TDenseSpace> BDFBaseType;
typedef typename BDFBaseType::DofsArrayType DofsArrayType;
typedef typename BDFBaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BDFBaseType::TSystemVectorType TSystemVectorType;
typedef typename BDFBaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BDFBaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef typename ModelPart::NodeType NodeType;
typedef FlowRateSlipUtility<LocalSystemMatrixType,LocalSystemVectorType,double>FlowRateSlipToolType;
///@}
///@name Life Cycle
///@{
// Constructor
explicit ShallowWaterResidualBasedBDFScheme(const std::size_t Order = 2, bool UpdateVelocities = false)
: BDFBaseType(Order)
, mRotationTool()
, mUpdateVelocities(UpdateVelocities)
{}
// Copy Constructor
explicit ShallowWaterResidualBasedBDFScheme(ShallowWaterResidualBasedBDFScheme& rOther)
: BDFBaseType(rOther)
, mRotationTool()
, mUpdateVelocities(rOther.mUpdateVelocities)
{}
/**
* Clone
*/
BaseTypePointer Clone() override
{
return BaseTypePointer( new ShallowWaterResidualBasedBDFScheme(*this) );
}
// Destructor
~ShallowWaterResidualBasedBDFScheme() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Performing the update of the solution within newton iteration
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
mRotationTool.RotateVelocities(rModelPart);
mpDofUpdater->UpdateDofs(rDofSet, rDx);
mRotationTool.RecoverVelocities(rModelPart);
BDFBaseType::UpdateDerivatives(rModelPart, rDofSet, rA, rDx, rb);
if (mUpdateVelocities) UpdateVelocities(rModelPart);
KRATOS_CATCH("ShallowWaterResidualBasedBDFScheme.Update");
}
/**
* @brief Performing the prediction of the solution
* @details It predicts the solution for the current step
* @param rModelPart The model of the problem to solve
* @param rDofSet set of all primary variables
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
const double delta_time = rModelPart.GetProcessInfo()[DELTA_TIME];
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
const auto it_node_begin = rModelPart.Nodes().begin();
const std::array<const Variable<double>*, 3> var_components = {&MOMENTUM_X, &MOMENTUM_Y, &HEIGHT};
const std::array<const Variable<double>*, 3> accel_components = {&ACCELERATION_X, &ACCELERATION_Y, &VERTICAL_VELOCITY};
#pragma omp parallel for
for (int i = 0; i < num_nodes; ++i) {
auto it_node = it_node_begin + i;
for (std::size_t j = 0; j < 3; ++j)
{
if (!it_node->IsFixed(*var_components[j])) {
double& un0 = it_node->FastGetSolutionStepValue(*var_components[j]);
double un1 = it_node->FastGetSolutionStepValue(*var_components[j], 1);
double dot_un1 = it_node->FastGetSolutionStepValue(*accel_components[j], 1);
un0 = un1 + delta_time * dot_un1;
}
}
UpdateFirstDerivative(it_node);
}
KRATOS_CATCH("ShallowWaterResidualBasedBDFScheme.Predict");
}
/**
* @brief This function is designed to be called in the builder and solver to introduce the selected time integration scheme.
* @param rCurrentElement The element to compute
* @param rLHS_Contribution The LHS matrix contribution
* @param rRHS_Contribution The RHS vector contribution
* @param rEquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateSystemContributions(
Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
BDFBaseType::CalculateSystemContributions(
rCurrentElement,
rLHS_Contribution,
rRHS_Contribution,
rEquationId,
rCurrentProcessInfo);
mRotationTool.Rotate(rLHS_Contribution,rRHS_Contribution,rCurrentElement.GetGeometry());
mRotationTool.ApplySlipCondition(rLHS_Contribution,rRHS_Contribution,rCurrentElement.GetGeometry());
}
/**
* @brief This function is designed to calculate just the RHS contribution
* @param rCurrentElement The element to compute
* @param rRHS_Contribution The RHS vector contribution
* @param rEquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateRHSContribution(
Element& rCurrentElement,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
BDFBaseType::CalculateRHSContribution(
rCurrentElement,
rRHS_Contribution,
rEquationId,
rCurrentProcessInfo);
mRotationTool.Rotate(rRHS_Contribution,rCurrentElement.GetGeometry());
mRotationTool.ApplySlipCondition(rRHS_Contribution,rCurrentElement.GetGeometry());
}
/**
* @brief This function is designed to be called in the builder and solver to introduce the selected time integration scheme.
* @param rCurrentCondition The condition to compute
* @param rLHS_Contribution The LHS matrix contribution
* @param rRHS_Contribution The RHS vector contribution
* @param rEquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateSystemContributions(
Condition& rCurrentCondition,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
BDFBaseType::CalculateSystemContributions(
rCurrentCondition,
rLHS_Contribution,
rRHS_Contribution,
rEquationId,
rCurrentProcessInfo);
mRotationTool.Rotate(rLHS_Contribution,rRHS_Contribution,rCurrentCondition.GetGeometry());
mRotationTool.ApplySlipCondition(rLHS_Contribution,rRHS_Contribution,rCurrentCondition.GetGeometry());
}
/**
* @brief This function is designed to calculate just the RHS contribution
* @param rCurrentCondition The condition to compute
* @param rRHS_Contribution The RHS vector contribution
* @param rEquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateRHSContribution(
Condition& rCurrentCondition,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
BDFBaseType::CalculateRHSContribution(
rCurrentCondition,
rRHS_Contribution,
rEquationId,
rCurrentProcessInfo);
mRotationTool.Rotate(rRHS_Contribution,rCurrentCondition.GetGeometry());
mRotationTool.ApplySlipCondition(rRHS_Contribution,rCurrentCondition.GetGeometry());
}
/*
* @brief Free memory allocated by this class.
*/
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ShallowWaterResidualBasedBDFScheme";
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater();
FlowRateSlipToolType mRotationTool;
bool mUpdateVelocities;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Updating first time derivative
* @param itNode the node interator
*/
void UpdateFirstDerivative(NodesArrayType::iterator itNode) override
{
array_1d<double, 3>& dot_un0 = itNode->FastGetSolutionStepValue(ACCELERATION);
double& dot_hn0 = itNode->FastGetSolutionStepValue(VERTICAL_VELOCITY);
noalias(dot_un0) = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(MOMENTUM);
dot_hn0 = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(HEIGHT);
for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order)
{
noalias(dot_un0) += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(MOMENTUM, i_order);
dot_hn0 += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(HEIGHT, i_order);
}
}
/**
* @brief Updating second time derivative
* @param itNode the node interator
*/
void UpdateSecondDerivative(NodesArrayType::iterator itNode) override {}
/**
* @brief Updating the velocities
* @param rModelPart The model part to compute
*/
void UpdateVelocities(ModelPart& rModelPart)
{
block_for_each(rModelPart.Nodes(), [&](NodeType& r_node){
auto& vel = r_node.FastGetSolutionStepValue(VELOCITY);
const auto& q = r_node.FastGetSolutionStepValue(MOMENTUM);
const auto& h = r_node.FastGetSolutionStepValue(HEIGHT);
vel = q / h;
});
}
/**
* @brief It adds the dynamic LHS contribution of the elements
* @param rLHS_Contribution The dynamic contribution for the LHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToLHS(
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
// Adding mass contribution to the dynamic stiffness
if (rM.size1() != 0) { // if M matrix declared
noalias(rLHS_Contribution) += rM * BDFBaseType::mBDF[0];
}
}
/**
* @brief It adds the dynamic RHS contribution of the elements
* @param rElement The element to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Element& rElement,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
const auto& r_const_element = rElement;
const std::size_t this_thread = OpenMPUtils::ThisThread();
// Adding inertia contribution
if (rM.size1() != 0) {
r_const_element.GetFirstDerivativesVector(BDFBaseType::mVector.dotun0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rM, BDFBaseType::mVector.dotun0[this_thread]);
}
}
/**
* @brief It adds the dynamic RHS contribution of the condition
* @param rCondition The condition to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Condition& rCondition,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
const auto& r_const_condition = rCondition;
const std::size_t this_thread = OpenMPUtils::ThisThread();
// Adding inertia contribution
if (rM.size1() != 0) {
r_const_condition.GetFirstDerivativesVector(BDFBaseType::mVector.dotun0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rM, BDFBaseType::mVector.dotun0[this_thread]);
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
}; // Class ShallowWaterResidualBasedBDFScheme
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // Namespace Kratos
#endif // KRATOS_SHALLOW_WATER_RESIDUAL_BASED_BDF_SCHEME_H_INCLUDED defined
|
GB_unop__atan_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__atan_fp32_fp32
// op(A') function: GB_unop_tran__atan_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = atanf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = atanf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = atanf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATAN || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__atan_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = atanf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__atan_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kernel_parallel.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/libxsmm/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
# if defined(_OPENMP)
#include <omp.h>
# endif
# if defined(__APPLE__) && defined(__arm64__)
#include <pthread.h>
# endif
typedef struct gemm_def {
libxsmm_datatype in_type;
libxsmm_datatype out_type;
libxsmm_datatype comp_type;
libxsmm_blasint m;
libxsmm_blasint n;
libxsmm_blasint k;
libxsmm_blasint lda;
libxsmm_blasint ldb;
libxsmm_blasint ldc;
double alpha;
double beta;
int trans_a;
int trans_b;
int vnni_a;
int vnni_b;
int vnni_c;
int unsigned_a;
int unsigned_b;
int unsigned_c;
int aligned_a;
int aligned_c;
int prefetch;
int br_type;
libxsmm_blasint br_count;
int br_unroll;
int tc_config;
float scf;
} gemm_def;
void init_random_matrix( libxsmm_datatype dtype, void* data, libxsmm_blasint br, libxsmm_blasint ld, libxsmm_blasint n ) {
double* d_data = (double*) data;
float* f_data = (float*) data;
libxsmm_bfloat16* bf_data = (libxsmm_bfloat16*) data;
int* i_data = (int*) data;
short* s_data = (short*) data;
char* c_data = (char*) data;
unsigned int l_r, l_i, l_j;
for (l_r = 0; l_r < br; l_r++) {
for (l_i = 0; l_i < ld; l_i++) {
for (l_j = 0; l_j < n; l_j++) {
if ( dtype == LIBXSMM_DATATYPE_F64 ) {
d_data[(l_r * ld * n) + (l_j * ld) + l_i] = libxsmm_rng_f64();
} else if ( dtype == LIBXSMM_DATATYPE_F32 ) {
f_data[(l_r * ld * n) + (l_j * ld) + l_i] = (float)libxsmm_rng_f64();
} else if ( dtype == LIBXSMM_DATATYPE_BF16 ) {
union libxsmm_bfloat16_hp tmp;
tmp.f = (float)libxsmm_rng_f64();
bf_data[(l_r * ld * n) + (l_j * ld) + l_i] = tmp.i[1];
} else if ( dtype == LIBXSMM_DATATYPE_I32 ) {
i_data[(l_r * ld * n) + (l_j * ld) + l_i] = (int) (libxsmm_rng_f64() * 20.0);
} else if ( dtype == LIBXSMM_DATATYPE_I16 ) {
s_data[(l_r * ld * n) + (l_j * ld) + l_i] = (short)(libxsmm_rng_f64() * 20.0);
} else if ( dtype == LIBXSMM_DATATYPE_I8 ) {
c_data[(l_r * ld * n) + (l_j * ld) + l_i] = (char) (libxsmm_rng_f64() * 20.0);
} else {
}
}
}
}
}
void init_zero_matrix( libxsmm_datatype dtype, void* data, libxsmm_blasint br, libxsmm_blasint ld, libxsmm_blasint n ) {
char* l_data = (char*) data;
memset( l_data, 0x0, br*ld*n*LIBXSMM_TYPESIZE(dtype) );
}
void init_garbage_matrix( libxsmm_datatype dtype, void* data, libxsmm_blasint br, libxsmm_blasint ld, libxsmm_blasint n ) {
char* l_data = (char*) data;
memset( l_data, 0xdeadbeef, br*ld*n*LIBXSMM_TYPESIZE(dtype) );
}
void ref_matmul( gemm_def* i_gemm_def, void* a, void* b, void* c ) {
unsigned int l_r, l_j, l_i, l_s, l_k2;
unsigned int lda = i_gemm_def->lda;
unsigned int ldb = i_gemm_def->ldb;
unsigned int ldc = i_gemm_def->ldc;
unsigned int m = i_gemm_def->m;
unsigned int n = i_gemm_def->n;
unsigned int k = i_gemm_def->k;
if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_F64) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_F64) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_F64) ) {
double* d_a = (double*)a;
double* d_b = (double*)b;
double* d_c = (double*)c;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
d_c[(l_j * ldc) + l_i] = 0.0;
}
for (l_s = 0; l_s < k; l_s++) {
if ( i_gemm_def->trans_b == 0 ) {
d_c[(l_j * ldc) + l_i] += d_a[(l_r * lda * k) + ((l_s * lda) + l_i)] * d_b[(l_r * ldb * n) + ((l_j * ldb) + l_s)];
} else {
d_c[(l_j * ldc) + l_i] += d_a[(l_r * lda * k) + ((l_s * lda) + l_i)] * d_b[(l_r * ldb * k) + ((l_s * ldb) + l_j)];
}
}
}
}
}
} else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_F32) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_F32) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_F32) ) {
float* f_a = (float*)a;
float* f_b = (float*)b;
float* f_c = (float*)c;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
f_c[(l_j * ldc) + l_i] = 0.0;
}
for (l_s = 0; l_s < k; l_s++) {
if ( i_gemm_def->trans_b == 0 ) {
f_c[(l_j * ldc) + l_i] += f_a[(l_r * lda * k) + ((l_s * lda) + l_i)] * f_b[(l_r * ldb * n) + ((l_j * ldb) + l_s)];
} else {
f_c[(l_j * ldc) + l_i] += f_a[(l_r * lda * k) + ((l_s * lda) + l_i)] * f_b[(l_r * ldb * k) + ((l_s * ldb) + l_j)];
}
}
}
}
}
} else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_I16) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_I32) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_I32) ) {
short* s_a = (short*)a;
short* s_b = (short*)b;
int* i_c = (int*)c;
int l_k_block = 2;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
i_c[(l_j * ldc) + l_i] = 0;
}
for (l_s = 0; l_s < (k / l_k_block); l_s++) {
for (l_k2 = 0; l_k2 < l_k_block; l_k2++) {
i_c[(l_j * ldc) + l_i] += s_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2] *
s_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2];
}
}
}
}
}
} else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_I8) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_I32) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_I32) &&
(i_gemm_def->unsigned_a == 1) && (i_gemm_def->unsigned_b == 0) ) {
unsigned char* c_a = (unsigned char*)a;
char* c_b = (char*)b;
int* i_c = (int*)c;
int l_k_block = 4;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
i_c[(l_j * ldc) + l_i] = 0;
}
for (l_s = 0; l_s < (k / l_k_block); l_s++) {
for (l_k2 = 0; l_k2 < l_k_block; l_k2++) {
i_c[(l_j * ldc) + l_i] += c_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2] *
c_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2];
}
}
}
}
}
} else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_I8) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_I32) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_I32) &&
(i_gemm_def->unsigned_a == 0) && (i_gemm_def->unsigned_b == 1) ) {
char* c_a = (char*)a;
unsigned char* c_b = (unsigned char*)b;
int* i_c = (int*)c;
int l_k_block = 4;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
i_c[(l_j * ldc) + l_i] = 0;
}
for (l_s = 0; l_s < (k / l_k_block); l_s++) {
for (l_k2 = 0; l_k2 < l_k_block; l_k2++) {
i_c[(l_j * ldc) + l_i] += c_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2] *
c_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2];
}
}
}
}
}
} else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_I8) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_I8) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_I32) &&
(i_gemm_def->unsigned_a == 0) && (i_gemm_def->unsigned_b == 1) && (i_gemm_def->unsigned_c == 1) ) {
char* c_a = (char*)a;
unsigned char* c_b = (unsigned char*)b;
unsigned char* c_c = (unsigned char*)c;
int l_k_block = 4;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
int tmp;
float ftmp;
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
tmp = 0;
} else {
tmp = (int)c_c[(l_j * ldc) + l_i];
}
for (l_s = 0; l_s < (k / l_k_block); l_s++) {
for (l_k2 = 0; l_k2 < l_k_block; l_k2++) {
tmp += c_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2] *
c_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2];
}
}
ftmp = (float)tmp;
ftmp *= i_gemm_def->scf;
c_c[(l_j * ldc) + l_i] = (unsigned char)ftmp;
}
}
}
} else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_BF16) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_F32) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_F32) ) {
libxsmm_bfloat16* h_a = (libxsmm_bfloat16*)a;
libxsmm_bfloat16* h_b = (libxsmm_bfloat16*)b;
float* f_c = (float*)c;
int l_k_block = ( i_gemm_def->vnni_a != 0) ? 2 : 1;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
f_c[(l_j * ldc) + l_i] = 0.0f;
}
for (l_s = 0; l_s < (k / l_k_block); l_s++) {
for (l_k2 = 0; l_k2 < l_k_block; l_k2++) {
union libxsmm_bfloat16_hp tmp_a_f;
union libxsmm_bfloat16_hp tmp_b_f;
tmp_a_f.i[0] = 0;
tmp_a_f.i[1] = h_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2];
tmp_b_f.i[0] = 0;
tmp_b_f.i[1] = h_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2];
f_c[(l_j * ldc) + l_i] += tmp_a_f.f * tmp_b_f.f;
}
}
}
}
}
} else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_BF16) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_BF16) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_F32) ) {
libxsmm_bfloat16* h_a = (libxsmm_bfloat16*)a;
libxsmm_bfloat16* h_b = (libxsmm_bfloat16*)b;
libxsmm_bfloat16* h_c = (libxsmm_bfloat16*)c;
int l_k_block = ( i_gemm_def->vnni_a != 0) ? 2 : 1;
float acc = 0.0f;
libxsmm_bfloat16 h_acc;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
acc = 0.0f;
} else {
union libxsmm_bfloat16_hp tmp;
tmp.i[0] = 0;
tmp.i[1] = h_c[(l_j * ldc) + l_i];
acc = tmp.f;
}
for (l_s = 0; l_s < (k / l_k_block); l_s++) {
for (l_k2 = 0; l_k2 < l_k_block; l_k2++) {
union libxsmm_bfloat16_hp tmp_a_f;
union libxsmm_bfloat16_hp tmp_b_f;
tmp_a_f.i[0] = 0;
tmp_a_f.i[1] = h_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2];
tmp_b_f.i[0] = 0;
tmp_b_f.i[1] = h_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2];
acc += tmp_a_f.f * tmp_b_f.f;
}
}
libxsmm_rne_convert_fp32_bf16( &acc, &h_acc, 1 );
h_c[(l_j * ldc) + l_i] = h_acc;
}
}
}
}
}
double check_matrix( libxsmm_datatype dtype, void* data_gold, void* data, libxsmm_blasint ld, libxsmm_blasint m, libxsmm_blasint n ) {
libxsmm_matdiff_info l_diff;
double max_error = 0.0;
libxsmm_matdiff_clear(&l_diff);
if ( dtype == LIBXSMM_DATATYPE_F64 ) {
libxsmm_matdiff(&l_diff, LIBXSMM_DATATYPE_F64, m, n, data_gold, data, &ld, &ld);
max_error = l_diff.linf_abs;
} else if ( dtype == LIBXSMM_DATATYPE_F32 ) {
libxsmm_matdiff(&l_diff, LIBXSMM_DATATYPE_F32, m, n, data_gold, data, &ld, &ld);
max_error = l_diff.linf_abs;
} else if ( dtype == LIBXSMM_DATATYPE_BF16 ) {
unsigned int l_i, l_j;
libxsmm_bfloat16* h_data = (libxsmm_bfloat16*)data;
libxsmm_bfloat16* h_data_gold = (libxsmm_bfloat16*)data_gold;
for (l_i = 0; l_i < m; l_i++) {
for (l_j = 0; l_j < n; l_j++) {
union libxsmm_bfloat16_hp tmp_c;
union libxsmm_bfloat16_hp tmp_gold;
double l_fabs;
tmp_c.i[1] = h_data[(l_j * ld) + l_i];
tmp_c.i[0] = 0;
tmp_gold.i[1] = h_data_gold[(l_j * ld) + l_i];
tmp_gold.i[0] = 0;
l_fabs = fabs((double)tmp_gold.f - (double)tmp_c.f);
if (max_error < l_fabs) max_error = l_fabs;
}
}
} else if ( dtype == LIBXSMM_DATATYPE_I32 ) {
unsigned int l_i, l_j;
int* l_data = (int*)data;
int* l_data_gold = (int*)data_gold;
for (l_i = 0; l_i < m; l_i++) {
for (l_j = 0; l_j < n; l_j++) {
const double l_fabs = fabs((double)l_data_gold[(l_j * ld) + l_i] - (double)l_data[(l_j * ld) + l_i]);
if (max_error < l_fabs) max_error = l_fabs;
}
}
} else if ( dtype == LIBXSMM_DATATYPE_I8 ) {
unsigned int l_i, l_j;
unsigned char* l_data = (unsigned char*)data;
unsigned char* l_data_gold = (unsigned char*)data_gold;
for (l_i = 0; l_i < m; l_i++) {
for (l_j = 0; l_j < n; l_j++) {
const double l_fabs = fabs((double)l_data_gold[(l_j * ld) + l_i] - (double)l_data[(l_j * ld) + l_i]);
if (max_error < l_fabs) max_error = l_fabs;
}
}
} else {
max_error = 100.0;
}
return max_error;
}
double jit_matmul( const gemm_def* i_gemm_def,
const void* i_a,
const void* i_b,
void* o_c,
void* o_c_perf,
const int i_reps,
const unsigned int i_print_jit_info ) {
/* define function pointer */
libxsmm_xmmfunction l_test_jit = { NULL };
libxsmm_xmmfunction cfg_tr = { NULL };
libxsmm_xmmfunction rls_tr = { NULL };
libxsmm_timer_tickint l_start;
libxsmm_mmkernel_info l_info;
libxsmm_gemm_shape l_shape;
libxsmm_gemm_batch_reduce_config l_brconfig;
libxsmm_gemm_ext_unary_argops l_argops;
libxsmm_gemm_ext_binary_postops l_postops;
libxsmm_bitfield l_flags = LIBXSMM_GEMM_FLAGS('N', 'N');
libxsmm_bitfield l_prefetch_flags = 0;
#if defined(USE_GEMM_EXT_FRONTEND)
libxsmm_gemm_ext_param gemm_param;
#else
libxsmm_gemm_param gemm_param;
#endif
double l_jittime, l_runtime;
size_t l_t, l_r;
char** l_a_addr = (char**)malloc(i_gemm_def->br_count*sizeof(char*));
char** l_b_addr = (char**)malloc(i_gemm_def->br_count*sizeof(char*));
unsigned long long* l_a_offs = (unsigned long long*)malloc(i_gemm_def->br_count*sizeof(unsigned long long));
unsigned long long* l_b_offs = (unsigned long long*)malloc(i_gemm_def->br_count*sizeof(unsigned long long));
double l_beta = i_gemm_def->beta;
unsigned long long l_br = (unsigned long long)i_gemm_def->br_count;
int l_cfg_flags = 0;
int l_rls_flags = 0;
if (0 == i_gemm_def) {
fprintf(stderr, "JIT: unsupported descriptor arguments or data type!\n");
return EXIT_FAILURE;
}
/* setup brgemm offsets */
if ( i_gemm_def->br_type == 2 ) {
for ( l_r = 0 ; l_r < i_gemm_def->br_count; l_r++ ) {
l_a_offs[l_r] = l_r * (size_t)i_gemm_def->lda * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type);
if (i_gemm_def->trans_b == 0) {
l_b_offs[l_r] = l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->n * LIBXSMM_TYPESIZE(i_gemm_def->in_type);
} else {
l_b_offs[l_r] = l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type);
}
}
}
/* set up the flags */
if ( i_gemm_def->trans_b != 0 ) {
l_flags |= LIBXSMM_GEMM_FLAG_TRANS_B;
}
if ( i_gemm_def->trans_a != 0 ) {
fprintf(stderr, "trans_a needs to be 0\n");
return EXIT_FAILURE;
}
if ( i_gemm_def->vnni_a != 0 ) {
l_flags |= LIBXSMM_GEMM_FLAG_VNNI_A;
}
if ( i_gemm_def->unsigned_a != 0 ) {
l_flags |= LIBXSMM_GEMM_FLAG_A_UNSIGNED;
}
if ( i_gemm_def->unsigned_b != 0 ) {
l_flags |= LIBXSMM_GEMM_FLAG_B_UNSIGNED;
}
l_flags |= (0 != i_gemm_def->aligned_a ? LIBXSMM_GEMM_FLAG_ALIGN_A : 0);
l_flags |= (0 != i_gemm_def->aligned_c ? LIBXSMM_GEMM_FLAG_ALIGN_C : 0);
l_flags |= ( l_beta == 0 ) ? LIBXSMM_GEMM_FLAG_BETA_0 : 0;
/* setting update GEMM struct */
l_shape = libxsmm_create_gemm_shape( i_gemm_def->m, i_gemm_def->n, i_gemm_def->k,
i_gemm_def->lda, i_gemm_def->ldb, i_gemm_def->ldc,
i_gemm_def->in_type, i_gemm_def->in_type, i_gemm_def->out_type, i_gemm_def->comp_type );
/* setting BRGEMM config struct */
if (i_gemm_def->br_type == 1) {
l_brconfig.br_type = LIBXSMM_GEMM_BATCH_REDUCE_ADDRESS;
l_brconfig.br_stride_a_hint = 0;
l_brconfig.br_stride_b_hint = 0;
l_brconfig.br_unroll_hint = ( i_gemm_def->br_unroll == 0 ) ? 0 : i_gemm_def->br_count;
} else if (i_gemm_def->br_type == 2) {
l_brconfig.br_type = LIBXSMM_GEMM_BATCH_REDUCE_OFFSET;
l_brconfig.br_stride_a_hint = 0;
l_brconfig.br_stride_b_hint = 0;
l_brconfig.br_unroll_hint = ( i_gemm_def->br_unroll == 0 ) ? 0 : i_gemm_def->br_count;
} else if (i_gemm_def->br_type == 3) {
l_brconfig.br_type = LIBXSMM_GEMM_BATCH_REDUCE_STRIDE;
l_brconfig.br_stride_a_hint = i_gemm_def->lda*i_gemm_def->k*LIBXSMM_TYPESIZE(i_gemm_def->in_type);
l_brconfig.br_stride_b_hint = (i_gemm_def->trans_b == 0) ? i_gemm_def->ldb*i_gemm_def->n*LIBXSMM_TYPESIZE(i_gemm_def->in_type) : i_gemm_def->ldb*i_gemm_def->k*LIBXSMM_TYPESIZE(i_gemm_def->in_type);
l_brconfig.br_unroll_hint = ( i_gemm_def->br_unroll == 0 ) ? 0 : i_gemm_def->br_count;
} else {
l_brconfig.br_type = LIBXSMM_GEMM_BATCH_REDUCE_NONE;
l_brconfig.br_stride_a_hint = 0;
l_brconfig.br_stride_b_hint = 0;
l_brconfig.br_unroll_hint = 0;
}
/* setting prefetch flags */
l_prefetch_flags = i_gemm_def->prefetch;
/* setting ext structs to 0 */
memset( &l_argops, 0, sizeof(libxsmm_gemm_ext_unary_argops) );
memset( &l_postops, 0, sizeof(libxsmm_gemm_ext_binary_postops) );
l_start = libxsmm_timer_tick();
if (i_gemm_def->tc_config) {
l_cfg_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | l_flags;
l_rls_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | l_flags;
l_flags |= (LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG);
cfg_tr.gemm = libxsmm_dispatch_brgemm_v2( l_shape, l_cfg_flags, l_prefetch_flags, l_brconfig );
rls_tr.gemm = libxsmm_dispatch_brgemm_v2( l_shape, l_rls_flags, l_prefetch_flags, l_brconfig );
}
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext = libxsmm_dispatch_brgemm_ext_v2( l_shape, l_flags, l_prefetch_flags, l_brconfig, l_argops, l_postops );
#else
l_test_jit.gemm = libxsmm_dispatch_brgemm_v2( l_shape, l_flags, l_prefetch_flags, l_brconfig );
#endif
l_jittime = libxsmm_timer_duration(l_start, libxsmm_timer_tick());
if (l_test_jit.xmm == 0) {
printf("JIT failed, please run with LIBXSMM_VERBOSE=-1 and/or with debug mode LIBXSMM library!\n");
exit(EXIT_FAILURE);
}
/* receive kernel information */
libxsmm_get_mmkernel_info(l_test_jit, &l_info);
/* run external tileconfig */
if (i_gemm_def->tc_config) {
cfg_tr.gemm( NULL );
}
/* reset GEMM parameter */
#if defined(USE_GEMM_EXT_FRONTEND)
memset( &gemm_param, 0, sizeof(libxsmm_gemm_ext_param) );
#else
memset( &gemm_param, 0, sizeof(libxsmm_gemm_param) );
#endif
gemm_param.op.tertiary = &l_br;
gemm_param.c.primary = (void*)o_c;
gemm_param.c.tertiary = (void*)(( i_gemm_def->unsigned_c != 0 ) ? &(i_gemm_def->scf) : NULL);
/* run correctness */
if (i_gemm_def->br_type == 0) {
gemm_param.a.primary = (void*)i_a;
gemm_param.b.primary = (void*)i_b;
if ( l_info.prefetch != LIBXSMM_GEMM_PREFETCH_NONE ) {
gemm_param.a.quaternary = (void*)i_a;
gemm_param.b.quaternary = (void*)i_b;
gemm_param.c.quaternary = (void*)o_c;
}
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
} else if (i_gemm_def->br_type == 1) {
gemm_param.a.primary = l_a_addr;
gemm_param.b.primary = l_b_addr;
for ( l_r = 0 ; l_r < i_gemm_def->br_count; l_r++ ) {
l_a_addr[l_r] = (char*)i_a + (l_r * (size_t)i_gemm_def->lda * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type));
if (i_gemm_def->trans_b == 0) {
l_b_addr[l_r] = (char*)i_b + (l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->n * LIBXSMM_TYPESIZE(i_gemm_def->in_type));
} else {
l_b_addr[l_r] = (char*)i_b + (l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type));
}
}
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
} else if (i_gemm_def->br_type == 2) {
gemm_param.a.primary = (void*)i_a;
gemm_param.a.secondary = l_a_offs;
gemm_param.b.primary = (void*)i_b;
gemm_param.b.secondary = l_b_offs;
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
} else if (i_gemm_def->br_type == 3) {
gemm_param.a.primary = (void*)i_a;
gemm_param.b.primary = (void*)i_b;
#if defined(USE_GEMM_EXT_FRONTEND)
test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
}
/* run performance */
gemm_param.c.primary = (void*)o_c_perf;
l_start = libxsmm_timer_tick();
if (i_gemm_def->br_type == 0) {
gemm_param.a.primary = (void*)i_a;
gemm_param.b.primary = (void*)i_b;
if ( l_info.prefetch != LIBXSMM_GEMM_PREFETCH_NONE ) {
gemm_param.a.quaternary = (void*)i_a;
gemm_param.b.quaternary = (void*)i_b;
gemm_param.c.quaternary = (void*)o_c_perf;
}
for (l_t = 0; l_t < i_reps; l_t++) {
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
}
} else if (i_gemm_def->br_type == 1) {
gemm_param.a.primary = l_a_addr;
gemm_param.b.primary = l_b_addr;
for (l_t = 0; l_t < i_reps; l_t++) {
for ( l_r = 0 ; l_r < i_gemm_def->br_count; l_r++ ) {
l_a_addr[l_r] = (char*)i_a + (l_r * (size_t)i_gemm_def->lda * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type));
if (i_gemm_def->trans_b == 0) {
l_b_addr[l_r] = (char*)i_b + (l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->n * LIBXSMM_TYPESIZE(i_gemm_def->in_type));
} else {
l_b_addr[l_r] = (char*)i_b + (l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type));
}
}
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
}
} else if (i_gemm_def->br_type == 2) {
gemm_param.a.primary = (void*)i_a;
gemm_param.a.secondary = l_a_offs;
gemm_param.b.primary = (void*)i_b;
gemm_param.b.secondary = l_b_offs;
for (l_t = 0; l_t < i_reps; l_t++) {
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
}
} else if (i_gemm_def->br_type == 3) {
gemm_param.a.primary = (void*)i_a;
gemm_param.b.primary = (void*)i_b;
for (l_t = 0; l_t < i_reps; l_t++) {
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
}
}
l_runtime = libxsmm_timer_duration(l_start, libxsmm_timer_tick());
/* run external tilerelease */
if (i_gemm_def->tc_config) {
rls_tr.gemm( NULL );
}
if ( i_print_jit_info == 0 ) {
printf("function pointer address: %llx\n", (unsigned long long)l_test_jit.xmm);
printf("%fs for creating jit\n", l_jittime);
}
free( (void*)l_a_addr );
free( (void*)l_b_addr );
free( (void*)l_a_offs );
free( (void*)l_b_offs );
return l_runtime;
}
void print_help(void) {
printf("\n\n");
printf("1. Usage (dense*dense=dense, correctness and performance):\n");
printf(" M\n");
printf(" N\n");
printf(" K\n");
printf(" LDA\n");
printf(" LDB\n");
printf(" LDC\n");
printf(" alpha: 1\n");
printf(" beta: 0 or 1\n");
printf(" 0: unaligned A, otherwise aligned\n");
printf(" 0: unaligned C, otherwise aligned\n");
printf(" 0: A normal, 1: A trans\n");
printf(" 0: B normal, 1: B trans\n");
printf(" PREFETCH: nopf (none), pfsigonly, BL2viaC, AL2, curAL2, AL2_BL2viaC, curAL2_BL2viaC\n");
printf(" PRECISION: SP, DP, I16I32, USI8I32, SUI8I32, SUI8UI8, BF16F32, BF16, BF16F32_FLAT, BF16_FLAT\n");
printf(" BRGEMM: nobr, addrbr, offsbr, strdbr\n");
printf(" BRsize: 1 - N\n");
printf(" BRunroll: 0/1\n");
printf(" #repetitions\n");
printf(" tile configuration: 1 - external, 0 - internal\n");
printf("\n\n");
printf("2. Usage (dense*dense=dense, performance only option available):\n");
printf(" filename with space-sperated sizes (M N K LDA LDB LDC)\n");
printf(" alpha: 1\n");
printf(" beta: 0 or 1\n");
printf(" 0: unaligned A, otherwise aligned\n");
printf(" 0: unaligned C, otherwise aligned\n");
printf(" 0: A normal, 1: A trans\n");
printf(" 0: B normal, 1: B trans\n");
printf(" PRECISION: SP, DP, I16I32, USI8I32, SUI8I32, SUI8UI8, BF16F32, BF16, BF16F32_FLAT, BF16_FLAT\n");
printf(" BRGEMM: nobr, addrbr, offsbr, strdbr\n");
printf(" BRsize: 1 - N\n");
printf(" BRunroll: 0/1\n");
printf(" #repetitions\n");
printf(" 0: no check, otherwise: run check\n");
printf(" tile configuration: 1 - external, 0 - internal\n");
printf("\n\n");
}
int main(int argc, char* argv []) {
char* l_precision = NULL;
libxsmm_blasint l_lda = 0, l_ldb = 0, l_ldc = 0;
libxsmm_blasint l_m = 0, l_n = 0, l_k = 0;
int l_aligned_a = 0;
int l_aligned_c = 0;
int l_trans_a = 0;
int l_trans_b = 0;
double l_alpha = 0;
double l_beta = 0;
int l_br = 1;
int l_br_type = 0;
int l_br_unroll = 0;
double l_runtime_libxsmm = 0;
int l_file_input = 0;
char* l_file_name = NULL;
FILE *l_file_handle = NULL;
int l_run_check = 0;
double l_total_max_error = 0.0;
int l_tc_config = 0;
int l_reps;
libxsmm_gemm_prefetch_type l_prefetch = LIBXSMM_GEMM_PREFETCH_NONE;
gemm_def l_gemm_def;
int l_n_threads = 1;
# if defined(__APPLE__) && defined(__arm64__)
# if 1
pthread_set_qos_class_self_np( QOS_CLASS_USER_INTERACTIVE, 0 );
# else
pthread_set_qos_class_self_np( QOS_CLASS_BACKGROUND, 0 );
# endif
# endif
/* check argument count for a valid range */
if ( argc == 20 || argc == 19 ) {
/* xgemm sizes */
l_m = atoi(argv[1]);
l_n = atoi(argv[2]);
l_k = atoi(argv[3]);
l_lda = atoi(argv[4]);
l_ldb = atoi(argv[5]);
l_ldc = atoi(argv[6]);
/* some sugar */
l_alpha = atof(argv[7]);
l_beta = atof(argv[8]);
l_aligned_a = atoi(argv[9]);
l_aligned_c = atoi(argv[10]);
l_trans_a = atoi(argv[11]);
l_trans_b = atoi(argv[12]);
/* arch specific stuff */
l_precision = argv[14];
l_br = atoi(argv[16]);
l_br_unroll = atoi(argv[17]);
l_reps = atoi(argv[18]);
if ( argc == 20 ) {
l_tc_config = atoi(argv[19]);
} else {
l_tc_config = 0;
}
/* set value of prefetch flag */
if (strcmp("nopf", argv[13]) == 0) {
l_prefetch = LIBXSMM_GEMM_PREFETCH_NONE;
}
else if (strcmp("pfsigonly", argv[13]) == 0) {
l_prefetch = LIBXSMM_GEMM_PREFETCH_SIGONLY;
}
else if (strcmp("BL2viaC", argv[13]) == 0) {
l_prefetch = LIBXSMM_GEMM_PREFETCH_BL2_VIA_C;
}
else if (strcmp("curAL2", argv[13]) == 0) {
l_prefetch = LIBXSMM_GEMM_PREFETCH_AL2_AHEAD;
}
else if (strcmp("curAL2_BL2viaC", argv[13]) == 0) {
l_prefetch = LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD;
}
else if (strcmp("AL2", argv[13]) == 0) {
l_prefetch = LIBXSMM_GEMM_PREFETCH_AL2;
}
else if (strcmp("AL2_BL2viaC", argv[13]) == 0) {
l_prefetch = LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C;
}
else {
print_help();
return EXIT_FAILURE;
}
if (strcmp("nobr", argv[15]) == 0) {
l_br_type = 0;
}
else if (strcmp("addrbr", argv[15]) == 0) {
l_br_type = 1;
}
else if (strcmp("offsbr", argv[15]) == 0) {
l_br_type = 2;
}
else if (strcmp("strdbr", argv[15]) == 0) {
l_br_type = 3;
}
else {
print_help();
return EXIT_FAILURE;
}
l_file_input = 0;
l_run_check = 1;
} else if ( argc == 15 || argc == 14 ) {
l_file_input = 1;
l_file_name = argv[1];
l_alpha = atof(argv[2]);
l_beta = atof(argv[3]);
l_aligned_a = atoi(argv[4]);
l_aligned_c = atoi(argv[5]);
l_trans_a = atoi(argv[6]);
l_trans_b = atoi(argv[7]);
l_precision = argv[8];
l_br = atoi(argv[10]);
l_br_unroll = atoi(argv[11]);
if ( argc == 15 ) {
l_tc_config = atoi(argv[14]);
} else {
l_tc_config = 0;
}
if (strcmp("nobr", argv[9]) == 0) {
l_br_type = 0;
}
else if (strcmp("addrbr", argv[9]) == 0) {
l_br_type = 1;
}
else if (strcmp("offsbr", argv[9]) == 0) {
l_br_type = 2;
}
else if (strcmp("strdbr", argv[9]) == 0) {
l_br_type = 3;
}
else {
print_help();
return EXIT_FAILURE;
}
l_reps = atoi(argv[12]);
l_run_check = atoi(argv[13]);
l_prefetch = LIBXSMM_GEMM_PREFETCH_NONE;
} else {
print_help();
return EXIT_FAILURE;
}
const char *env_arch = getenv("LIBXSMM_TARGET");
const int is_env_SPR = (
env_arch == libxsmm_stristr(env_arch, "spr") ||
env_arch == libxsmm_stristr(env_arch, "amx"));
int arch_cpuid = libxsmm_cpuid();
if ((!is_env_SPR && arch_cpuid < LIBXSMM_X86_AVX512_SPR)
&& (l_tc_config)) {
printf("Warning: external tile configuration will be ingnored\n");
l_tc_config = 0;
}
l_br = (l_br < 1) ? 1 : l_br;
l_br = (l_br_type == 0) ? 1 : l_br;
l_br_unroll = (l_br_type == 0) ? 0 : l_br_unroll;
/* check alpha */
if ( LIBXSMM_NEQ(l_alpha, 1.0) ) {
fprintf(stderr, "JIT: alpha needs to be 1.0!\n");
exit(EXIT_FAILURE);
}
/* check beta */
if ( LIBXSMM_NEQ(l_beta, 0.0) && LIBXSMM_NEQ(l_beta, 1.0) ) {
fprintf(stderr, "JIT: beta needs to be 0.0 or 1.0!\n");
exit(EXIT_FAILURE);
}
/* setting static GEMM parameters */
l_gemm_def.alpha = l_alpha;
l_gemm_def.beta = l_beta;
l_gemm_def.trans_a = l_trans_a;
l_gemm_def.trans_b = l_trans_b;
l_gemm_def.vnni_a = 0;
l_gemm_def.vnni_b = 0;
l_gemm_def.vnni_c = 0;
l_gemm_def.unsigned_a = 0;
l_gemm_def.unsigned_b = 0;
l_gemm_def.unsigned_c = 0;
l_gemm_def.aligned_a = l_aligned_a;
l_gemm_def.aligned_c = l_aligned_c;
l_gemm_def.prefetch = l_prefetch;
l_gemm_def.br_type = l_br_type;
l_gemm_def.br_count = l_br;
l_gemm_def.br_unroll = l_br_unroll;
l_gemm_def.tc_config = l_tc_config;
l_gemm_def.scf = 0.0;
/* setting precision in GEMM struct */
if ( (strcmp(l_precision, "DP") == 0) ) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_F64;
l_gemm_def.out_type = LIBXSMM_DATATYPE_F64;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_F64;
} else if ( (strcmp(l_precision, "SP") == 0) ) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_F32;
l_gemm_def.out_type = LIBXSMM_DATATYPE_F32;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_F32;
} else if ( (strcmp(l_precision, "I16I32") == 0) ) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_I16;
l_gemm_def.out_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.vnni_a = 1;
l_gemm_def.trans_a = 0;
l_gemm_def.trans_b = 0;
} else if (strcmp(l_precision, "USI8I32") == 0) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_I8;
l_gemm_def.out_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.vnni_a = 1;
l_gemm_def.trans_a = 0;
l_gemm_def.trans_b = 0;
l_gemm_def.unsigned_a = 1;
} else if (strcmp(l_precision, "SUI8I32") == 0) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_I8;
l_gemm_def.out_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.vnni_a = 1;
l_gemm_def.trans_a = 0;
l_gemm_def.trans_b = 0;
l_gemm_def.unsigned_b = 1;
} else if (strcmp(l_precision, "SUI8UI8") == 0) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_I8;
l_gemm_def.out_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.vnni_a = 1;
l_gemm_def.trans_a = 0;
l_gemm_def.trans_b = 0;
l_gemm_def.unsigned_b = 1;
l_gemm_def.unsigned_c = 1;
l_gemm_def.scf = 1.0f;
} else if (strcmp(l_precision, "BF16F32") == 0) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_BF16;
l_gemm_def.out_type = LIBXSMM_DATATYPE_F32;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_F32;
l_gemm_def.vnni_a = 1;
l_gemm_def.trans_a = 0;
l_gemm_def.trans_b = 0;
} else if (strcmp(l_precision, "BF16") == 0) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_BF16;
l_gemm_def.out_type = LIBXSMM_DATATYPE_BF16;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_F32;
l_gemm_def.vnni_a = 1;
l_gemm_def.trans_a = 0;
l_gemm_def.trans_b = 0;
} else if (strcmp(l_precision, "BF16F32_FLAT") == 0) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_BF16;
l_gemm_def.out_type = LIBXSMM_DATATYPE_F32;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_F32;
} else if (strcmp(l_precision, "BF16_FLAT") == 0) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_BF16;
l_gemm_def.out_type = LIBXSMM_DATATYPE_BF16;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_F32;
} else {
fprintf(stderr, "Unsupported precision %s!\n", l_precision);
exit(EXIT_FAILURE);
}
if ( l_file_input != 0 ) {
l_file_handle = fopen( l_file_name, "r" );
} else {
if ( l_trans_b == 0 ) {
printf("------------------------------------------------\n");
printf("RUNNING (%ix%i) X (%ix%i) = (%ix%i), %s, BR=%i\n", l_m, l_k, l_k, l_n, l_m, l_n, l_precision, l_br);
printf("------------------------------------------------\n");
} else {
printf("------------------------------------------------\n");
printf("RUNNING (%ix%i) X (%ix%i)^T = (%ix%i), %s, BR=%i\n", l_m, l_k, l_k, l_n, l_m, l_n, l_precision, l_br);
printf("------------------------------------------------\n");
}
}
/* read the number of threads */
#if defined(_OPENMP)
#pragma omp parallel
{
#pragma omp master
{
l_n_threads = omp_get_num_threads();
}
}
#endif
unsigned int l_keep_going = 0;
do {
double error = 0.0;
if ( l_file_input != 0 ) {
char l_line[512];
if ( fgets( l_line, 512, l_file_handle) == NULL ) {
l_keep_going = 0;
break;
} else {
l_keep_going = 1;
}
if ( 6 != sscanf( l_line, "%i %i %i %i %i %i", &l_m, &l_n, &l_k, &l_lda, &l_ldb, &l_ldc ) ) exit(EXIT_FAILURE);
}
l_gemm_def.m = l_m;
l_gemm_def.n = l_n;
l_gemm_def.k = l_k;
l_gemm_def.lda = l_lda;
l_gemm_def.ldb = l_ldb;
l_gemm_def.ldc = l_ldc;
l_runtime_libxsmm = 0;
#if defined(_OPENMP)
#pragma omp parallel reduction(+:l_runtime_libxsmm)
#endif
{
char *l_a, *l_b, *l_c, *l_c_perf, *l_c_gold;
l_a = (char*)libxsmm_aligned_malloc((size_t)l_lda * (size_t)l_k * (size_t)l_br * LIBXSMM_TYPESIZE(l_gemm_def.in_type), 64);
if (l_gemm_def.trans_b == 0) {
l_b = (char*)libxsmm_aligned_malloc((size_t)l_ldb * (size_t)l_n * (size_t)l_br * LIBXSMM_TYPESIZE(l_gemm_def.in_type), 64);
} else {
l_b = (char*)libxsmm_aligned_malloc((size_t)l_ldb * (size_t)l_k * (size_t)l_br * LIBXSMM_TYPESIZE(l_gemm_def.in_type), 64);
}
l_c = (char*)libxsmm_aligned_malloc((size_t)l_ldc * (size_t)l_n * LIBXSMM_TYPESIZE(l_gemm_def.out_type), 64);
l_c_perf = (char*)libxsmm_aligned_malloc((size_t)l_ldc * (size_t)l_n * LIBXSMM_TYPESIZE(l_gemm_def.out_type), 64);
l_c_gold = (char*)libxsmm_aligned_malloc((size_t)l_ldc * (size_t)l_n * LIBXSMM_TYPESIZE(l_gemm_def.out_type), 64);
init_random_matrix( l_gemm_def.in_type, l_a, l_br, l_lda, l_k );
if (l_gemm_def.trans_b == 0) {
init_random_matrix( l_gemm_def.in_type, l_b, l_br, l_ldb, l_n );
} else {
init_random_matrix( l_gemm_def.in_type, l_b, l_br, l_ldb, l_k );
}
if ( l_beta == 0 ) {
init_garbage_matrix( l_gemm_def.out_type, l_c, 1, l_ldc, l_n );
init_garbage_matrix( l_gemm_def.out_type, l_c_perf, 1, l_ldc, l_n );
init_garbage_matrix( l_gemm_def.out_type, l_c_gold, 1, l_ldc, l_n );
} else {
init_zero_matrix( l_gemm_def.out_type, l_c, 1, l_ldc, l_n );
init_zero_matrix( l_gemm_def.out_type, l_c_perf, 1, l_ldc, l_n );
init_zero_matrix( l_gemm_def.out_type, l_c_gold, 1, l_ldc, l_n );
}
/* run gold solution */
#pragma omp master
{
ref_matmul( &l_gemm_def, l_a, l_b, l_c_gold );
}
/* run LIBXSMM solution */
l_runtime_libxsmm = jit_matmul( &l_gemm_def, l_a, l_b, l_c, l_c_perf, l_reps, l_file_input );
/* run compare */
#pragma omp master
{
error = check_matrix( l_gemm_def.out_type, l_c_gold, l_c, l_ldc, l_m, l_n );
}
libxsmm_free(l_a);
libxsmm_free(l_b);
libxsmm_free(l_c);
libxsmm_free(l_c_perf);
libxsmm_free(l_c_gold);
}
l_runtime_libxsmm /= (double)l_n_threads;
if ( l_file_input == 0 ) {
printf("%fs for libxsmm\n", l_runtime_libxsmm);
printf("%f GFLOPS for libxsmm\n", ((double)((double)l_reps * (double)l_m * (double)l_n * (double)l_k * (double)l_br * (double)l_n_threads) * 2.0) / (l_runtime_libxsmm * 1.0e9));
printf("max. error: %f\n", error);
} else {
if ( l_run_check == 1 ) {
printf("%i %i %i %i %i %i %i %i %i %s %f %f\n", l_m, l_n, l_k, l_lda, l_ldb, l_ldc, l_br, l_br_type, l_br_unroll, l_precision, ((double)((double)l_reps * (double)l_m * (double)l_n * (double)l_k * (double)l_br * (double)l_n_threads) * 2.0) / (l_runtime_libxsmm * 1.0e9), error );
} else {
printf("%i %i %i %i %i %i %i %i %i %s %f\n", l_m, l_n, l_k, l_lda, l_ldb, l_ldc, l_br, l_br_type, l_br_unroll, l_precision, ((double)((double)l_reps * (double)l_m * (double)l_n * (double)l_k * (double)l_br * (double)l_n_threads) * 2.0) / (l_runtime_libxsmm * 1.0e9) );
}
}
if ( (l_total_max_error < error) && (l_run_check == 1) ) {
l_total_max_error = error;
}
} while ( l_keep_going );
if ( l_file_input != 0 ) {
fclose( l_file_handle );
} else {
printf("------------------------------------------------\n");
}
/* Print total max error */
printf("\n\n Total Max Error %f\n\n", l_total_max_error );
if ( l_total_max_error >= 0.00005 && l_br_type == 0) {
return EXIT_FAILURE;
} else if ( l_total_max_error >= 0.0005 && l_br_type > 0) {
return EXIT_FAILURE;
} else {
return EXIT_SUCCESS;
}
}
|
polybench.h | /**
* polybench.h: This file is part of the PolyBench/C 3.2 test suite.
*
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
*/
/*
* Polybench header for instrumentation.
*
* Programs must be compiled with `-I utilities utilities/polybench.c'
*
* Optionally, one can define:
*
* -DPOLYBENCH_TIME, to report the execution time,
* OR (exclusive):
* -DPOLYBENCH_PAPI, to use PAPI H/W counters (defined in polybench.c)
*
*
* See README or utilities/polybench.c for additional options.
*
*/
#ifndef POLYBENCH_H
# define POLYBENCH_H
# include <stdlib.h>
/* Array padding. By default, none is used. */
# ifndef POLYBENCH_PADDING_FACTOR
/* default: */
# define POLYBENCH_PADDING_FACTOR 0
# endif
/* C99 arrays in function prototype. By default, do not use. */
# ifdef POLYBENCH_USE_C99_PROTO
# define POLYBENCH_C99_SELECT(x,y) y
# else
/* default: */
# define POLYBENCH_C99_SELECT(x,y) x
# endif
/* Scalar loop bounds in SCoPs. By default, use parametric loop bounds. */
# ifdef POLYBENCH_USE_SCALAR_LB
# define POLYBENCH_LOOP_BOUND(x,y) x
# else
/* default: */
# define POLYBENCH_LOOP_BOUND(x,y) y
# endif
/* Macros to reference an array. Generic for heap and stack arrays
(C99). Each array dimensionality has his own macro, to be used at
declaration or as a function argument.
Example:
int b[x] => POLYBENCH_1D_ARRAY(b, x)
int A[N][N] => POLYBENCH_2D_ARRAY(A, N, N)
*/
# ifndef POLYBENCH_STACK_ARRAYS
# define POLYBENCH_ARRAY(x) *x
# define POLYBENCH_FREE_ARRAY(x) free((void*)x);
# define POLYBENCH_DECL_VAR(x) (*x)
# else
# define POLYBENCH_ARRAY(x) x
# define POLYBENCH_FREE_ARRAY(x)
# define POLYBENCH_DECL_VAR(x) x
# endif
/* Macros for using arrays in the function prototypes. */
# define POLYBENCH_1D(var, dim1,ddim1) var[POLYBENCH_C99_SELECT(dim1,ddim1) + POLYBENCH_PADDING_FACTOR]
# define POLYBENCH_2D(var, dim1, dim2, ddim1, ddim2) var[POLYBENCH_C99_SELECT(dim1,ddim1) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim2,ddim2) + POLYBENCH_PADDING_FACTOR]
# define POLYBENCH_3D(var, dim1, dim2, dim3, ddim1, ddim2, ddim3) var[POLYBENCH_C99_SELECT(dim1,ddim1) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim2,ddim2) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim3,ddim3) + POLYBENCH_PADDING_FACTOR]
# define POLYBENCH_4D(var, dim1, dim2, dim3, dim4, ddim1, ddim2, ddim3, ddim4) var[POLYBENCH_C99_SELECT(dim1,ddim1) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim2,ddim2) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim3,ddim3) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim4,ddim4) + POLYBENCH_PADDING_FACTOR]
# define POLYBENCH_5D(var, dim1, dim2, dim3, dim4, dim5, ddim1, ddim2, ddim3, ddim4, ddim5) var[POLYBENCH_C99_SELECT(dim1,ddim1) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim2,ddim2) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim3,ddim3) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim4,ddim4) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim5,ddim5) + POLYBENCH_PADDING_FACTOR]
/* Macros to allocate heap arrays.
Example:
polybench_alloc_2d_array(N, M, double) => allocates N x M x sizeof(double)
and returns a pointer to the 2d array
*/
# define POLYBENCH_ALLOC_1D_ARRAY(n1, type) \
(type(*)[n1 + POLYBENCH_PADDING_FACTOR])polybench_alloc_data (n1 + POLYBENCH_PADDING_FACTOR, sizeof(type))
# define POLYBENCH_ALLOC_2D_ARRAY(n1, n2, type) \
(type(*)[n1 + POLYBENCH_PADDING_FACTOR][n2 + POLYBENCH_PADDING_FACTOR])polybench_alloc_data ((n1 + POLYBENCH_PADDING_FACTOR) * (n2 + POLYBENCH_PADDING_FACTOR), sizeof(type))
# define POLYBENCH_ALLOC_3D_ARRAY(n1, n2, n3, type) \
(type(*)[n1 + POLYBENCH_PADDING_FACTOR][n2 + POLYBENCH_PADDING_FACTOR][n3 + POLYBENCH_PADDING_FACTOR])polybench_alloc_data ((n1 + POLYBENCH_PADDING_FACTOR) * (n2 + POLYBENCH_PADDING_FACTOR) * (n3 + POLYBENCH_PADDING_FACTOR), sizeof(type))
# define POLYBENCH_ALLOC_4D_ARRAY(n1, n2, n3, n4, type) \
(type(*)[n1 + POLYBENCH_PADDING_FACTOR][n2 + POLYBENCH_PADDING_FACTOR][n3 + POLYBENCH_PADDING_FACTOR][n4 + POLYBENCH_PADDING_FACTOR])polybench_alloc_data ((n1 + POLYBENCH_PADDING_FACTOR) * (n2 + POLYBENCH_PADDING_FACTOR) * (n3 + POLYBENCH_PADDING_FACTOR) * (n4 + POLYBENCH_PADDING_FACTOR), sizeof(type))
# define POLYBENCH_ALLOC_5D_ARRAY(n1, n2, n3, n4, n5, type) \
(type(*)[n1 + POLYBENCH_PADDING_FACTOR][n2 + POLYBENCH_PADDING_FACTOR][n3 + POLYBENCH_PADDING_FACTOR][n4 + POLYBENCH_PADDING_FACTOR][n5 + POLYBENCH_PADDING_FACTOR])polybench_alloc_data ((n1 + POLYBENCH_PADDING_FACTOR) * (n2 + POLYBENCH_PADDING_FACTOR) * (n3 + POLYBENCH_PADDING_FACTOR) * (n4 + POLYBENCH_PADDING_FACTOR) * (n5 + POLYBENCH_PADDING_FACTOR), sizeof(type))
/* Macros for array declaration. */
# ifndef POLYBENCH_STACK_ARRAYS
# define POLYBENCH_1D_ARRAY_DECL(var, type, dim1, ddim1) \
type POLYBENCH_1D(POLYBENCH_DECL_VAR(var), dim1, ddim1); \
var = POLYBENCH_ALLOC_1D_ARRAY(POLYBENCH_C99_SELECT(dim1, ddim1), type);
# define POLYBENCH_2D_ARRAY_DECL(var, type, dim1, dim2, ddim1, ddim2) \
type POLYBENCH_2D(POLYBENCH_DECL_VAR(var), dim1, dim2, ddim1, ddim2); \
var = POLYBENCH_ALLOC_2D_ARRAY(POLYBENCH_C99_SELECT(dim1, ddim1), POLYBENCH_C99_SELECT(dim2, ddim2), type);
# define POLYBENCH_3D_ARRAY_DECL(var, type, dim1, dim2, dim3, ddim1, ddim2, ddim3) \
type POLYBENCH_3D(POLYBENCH_DECL_VAR(var), dim1, dim2, dim3, ddim1, ddim2, ddim3); \
var = POLYBENCH_ALLOC_3D_ARRAY(POLYBENCH_C99_SELECT(dim1, ddim1), POLYBENCH_C99_SELECT(dim2, ddim2), POLYBENCH_C99_SELECT(dim3, ddim3), type);
# define POLYBENCH_4D_ARRAY_DECL(var, type, dim1, dim2, dim3, dim4, ddim1, ddim2, ddim3, ddim4) \
type POLYBENCH_4D(POLYBENCH_DECL_VAR(var), dim1, dim2, ,dim3, dim4, ddim1, ddim2, ddim3, ddim4); \
var = POLYBENCH_ALLOC_4D_ARRAY(POLYBENCH_C99_SELECT(dim1, ddim1), POLYBENCH_C99_SELECT(dim2, ddim2), POLYBENCH_C99_SELECT(dim3, ddim3), POLYBENCH_C99_SELECT(dim4, ddim4), type);
# define POLYBENCH_5D_ARRAY_DECL(var, type, dim1, dim2, dim3, dim4, dim5, ddim1, ddim2, ddim3, ddim4, ddim5) \
type POLYBENCH_5D(POLYBENCH_DECL_VAR(var), dim1, dim2, dim3, dim4, dim5, ddim1, ddim2, ddim3, ddim4, ddim5); \
var = POLYBENCH_ALLOC_5D_ARRAY(POLYBENCH_C99_SELECT(dim1, ddim1), POLYBENCH_C99_SELECT(dim2, ddim2), POLYBENCH_C99_SELECT(dim3, ddim3), POLYBENCH_C99_SELECT(dim4, ddim4), POLYBENCH_C99_SELECT(dim5, ddim5), type);
# else
# define POLYBENCH_1D_ARRAY_DECL(var, type, dim1, ddim1) \
type POLYBENCH_1D(POLYBENCH_DECL_VAR(var), dim1, ddim1);
# define POLYBENCH_2D_ARRAY_DECL(var, type, dim1, dim2, ddim1, ddim2) \
type POLYBENCH_2D(POLYBENCH_DECL_VAR(var), dim1, dim2, ddim1, ddim2);
# define POLYBENCH_3D_ARRAY_DECL(var, type, dim1, dim2, dim3, ddim1, ddim2, ddim3) \
type POLYBENCH_3D(POLYBENCH_DECL_VAR(var), dim1, dim2, dim3, ddim1, ddim2, ddim3);
# define POLYBENCH_4D_ARRAY_DECL(var, type, dim1, dim2, dim3, dim4, ddim1, ddim2, ddim3, ddim4) \
type POLYBENCH_4D(POLYBENCH_DECL_VAR(var), dim1, dim2, dim3, dim4, ddim1, ddim2, ddim3, ddim4);
# define POLYBENCH_5D_ARRAY_DECL(var, type, dim1, dim2, dim3, dim4, dim5, ddim1, ddim2, ddim3, ddim4, ddim5) \
type POLYBENCH_5D(POLYBENCH_DECL_VAR(var), dim1, dim2, dim3, dim4, dim5, ddim1, ddim2, ddim3, ddim4, ddim5);
# endif
/* Dead-code elimination macros. Use argc/argv for the run-time check. */
# ifndef POLYBENCH_DUMP_ARRAYS
# define POLYBENCH_DCE_ONLY_CODE if (argc > 42 && ! strcmp(argv[0], ""))
# else
# define POLYBENCH_DCE_ONLY_CODE
# endif
# define polybench_prevent_dce(func) \
POLYBENCH_DCE_ONLY_CODE \
func
/* Performance-related instrumentation. See polybench.c */
# define polybench_start_instruments
# define polybench_stop_instruments
# define polybench_print_instruments
/* PAPI support. */
# ifdef POLYBENCH_PAPI
extern const unsigned int polybench_papi_eventlist[];
# undef polybench_start_instruments
# undef polybench_stop_instruments
# undef polybench_print_instruments
# define polybench_set_papi_thread_report(x) \
polybench_papi_counters_threadid = x;
# define polybench_start_instruments \
polybench_prepare_instruments(); \
polybench_papi_init(); \
int evid; \
for (evid = 0; polybench_papi_eventlist[evid] != 0; evid++) \
{ \
if (polybench_papi_start_counter(evid)) \
continue; \
# define polybench_stop_instruments \
polybench_papi_stop_counter(evid); \
} \
polybench_papi_close(); \
# define polybench_print_instruments polybench_papi_print();
# endif
/* Timing support. */
# if defined(POLYBENCH_TIME) || defined(POLYBENCH_GFLOPS)
# undef polybench_start_instruments
# undef polybench_stop_instruments
# undef polybench_print_instruments
# define polybench_start_instruments polybench_timer_start();
# define polybench_stop_instruments polybench_timer_stop();
# define polybench_print_instruments polybench_timer_print();
extern double polybench_program_total_flops;
extern void polybench_timer_start();
extern void polybench_timer_stop();
extern void polybench_timer_print();
# endif
/* Function declaration. */
# ifdef POLYBENCH_TIME
extern void polybench_timer_start();
extern void polybench_timer_stop();
extern void polybench_timer_print();
# endif
# ifdef POLYBENCH_PAPI
extern void polybench_prepare_instruments();
extern int polybench_papi_start_counter(int evid);
extern void polybench_papi_stop_counter(int evid);
extern void polybench_papi_init();
extern void polybench_papi_close();
extern void polybench_papi_print();
# endif
/* Function prototypes. */
extern void* polybench_alloc_data(unsigned long long int n, int elt_size);
/*
LLVM: I'm appending the content of the file polybench.c here. It'll avoid us
to have to copy it to the folder being compiled in the LLVM test suite.
*/
/**
* polybench.c: This file is part of the PolyBench/C 3.2 test suite.
*
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sched.h>
#include <math.h>
#ifdef _OPENMP
# include <omp.h>
#endif
/* By default, collect PAPI counters on thread 0. */
#ifndef POLYBENCH_THREAD_MONITOR
# define POLYBENCH_THREAD_MONITOR 0
#endif
/* Total LLC cache size. By default 32+MB.. */
#ifndef POLYBENCH_CACHE_SIZE_KB
# define POLYBENCH_CACHE_SIZE_KB 32770
#endif
int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR;
double polybench_program_total_flops = 0;
#ifdef POLYBENCH_PAPI
# include <papi.h>
# define POLYBENCH_MAX_NB_PAPI_COUNTERS 96
char* _polybench_papi_eventlist[] = {
#include "papi_counters.list"
NULL
};
int polybench_papi_eventset;
int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS];
long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS];
#endif
/* Timer code (gettimeofday). */
double polybench_t_start, polybench_t_end;
/* Timer code (RDTSC). */
unsigned long long int polybench_c_start, polybench_c_end;
static
double rtclock()
{
#ifdef POLYBENCH_TIME
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, NULL);
if (stat != 0)
printf ("Error return from gettimeofday: %d", stat);
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
#else
return 0;
#endif
}
#ifdef POLYBENCH_CYCLE_ACCURATE_TIMER
static
unsigned long long int rdtsc()
{
unsigned long long int ret = 0;
unsigned int cycles_lo;
unsigned int cycles_hi;
__asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi));
ret = (unsigned long long int)cycles_hi << 32 | cycles_lo;
return ret;
}
#endif
void polybench_flush_cache()
{
int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double);
double* flush = (double*) calloc (cs, sizeof(double));
int i;
double tmp = 0.0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < cs; i++)
tmp += flush[i];
assert (tmp <= 10.0);
free (flush);
}
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
void polybench_linux_fifo_scheduler()
{
/* Use FIFO scheduler to limit OS interference. Program must be run
as root, and this works only for Linux kernels. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);
sched_setscheduler (0, SCHED_FIFO, &schedParam);
}
void polybench_linux_standard_scheduler()
{
/* Restore to standard scheduler policy. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER);
sched_setscheduler (0, SCHED_OTHER, &schedParam);
}
#endif
#ifdef POLYBENCH_PAPI
static
void test_fail(char *file, int line, char *call, int retval)
{
char buf[128];
memset(buf, '\0', sizeof(buf));
if (retval != 0)
fprintf (stdout,"%-40s FAILED\nLine # %d\n", file, line);
else
{
fprintf (stdout,"%-40s SKIPPED\n", file);
fprintf (stdout,"Line # %d\n", line);
}
if (retval == PAPI_ESYS)
{
sprintf (buf, "System error in %s", call);
perror (buf);
}
else if (retval > 0)
fprintf (stdout,"Error: %s\n", call);
else if (retval == 0)
fprintf (stdout,"Error: %s\n", call);
else
{
char errstring[PAPI_MAX_STR_LEN];
PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN);
fprintf (stdout,"Error in %s: %s\n", call, errstring);
}
fprintf (stdout,"\n");
if (PAPI_is_initialized ())
PAPI_shutdown ();
exit (1);
}
void polybench_papi_init()
{
# ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
if (omp_get_max_threads () < polybench_papi_counters_threadid)
polybench_papi_counters_threadid = omp_get_max_threads () - 1;
}
#pragma omp barrier
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
polybench_papi_eventset = PAPI_NULL;
if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT)
test_fail (__FILE__, __LINE__, "PAPI_library_init", retval);
if ((retval = PAPI_create_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval);
int k;
for (k = 0; _polybench_papi_eventlist[k]; ++k)
{
if ((retval =
PAPI_event_name_to_code (_polybench_papi_eventlist[k],
&(polybench_papi_eventlist[k])))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval);
}
polybench_papi_eventlist[k] = 0;
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_close()
{
# ifdef _OPENMP
#pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval);
if (PAPI_is_initialized ())
PAPI_shutdown ();
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
int polybench_papi_start_counter(int evid)
{
# ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache();
# endif
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval = 1;
char descr[PAPI_MAX_STR_LEN];
PAPI_event_info_t evinfo;
PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr);
if (PAPI_add_event (polybench_papi_eventset,
polybench_papi_eventlist[evid]) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_add_event", 1);
if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo)
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval);
if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_start", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
return 0;
}
void polybench_papi_stop_counter(int evid)
{
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
long_long values[1];
values[0] = 0;
if ((retval = PAPI_read (polybench_papi_eventset, &values[0]))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_read", retval);
if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_stop", retval);
polybench_papi_values[evid] = values[0];
if ((retval = PAPI_remove_event
(polybench_papi_eventset,
polybench_papi_eventlist[evid])) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_print()
{
int verbose = 0;
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num() == polybench_papi_counters_threadid)
{
#ifdef POLYBENCH_PAPI_VERBOSE
verbose = 1;
#endif
if (verbose)
printf ("On thread %d:\n", polybench_papi_counters_threadid);
#endif
int evid;
for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid)
{
if (verbose)
printf ("%s=", _polybench_papi_eventlist[evid]);
printf ("%llu ", polybench_papi_values[evid]);
if (verbose)
printf ("\n");
}
printf ("\n");
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
#endif
/* ! POLYBENCH_PAPI */
void polybench_prepare_instruments()
{
#ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_fifo_scheduler ();
#endif
}
void polybench_timer_start()
{
polybench_prepare_instruments ();
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_start = rtclock ();
#else
polybench_c_start = rdtsc ();
#endif
}
void polybench_timer_stop()
{
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_end = rtclock ();
#else
polybench_c_end = rdtsc ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_standard_scheduler ();
#endif
}
void polybench_timer_print()
{
#ifdef POLYBENCH_GFLOPS
if (__polybench_program_total_flops == 0)
{
printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n");
printf ("%0.6lf\n", polybench_t_end - polybench_t_start);
}
else
printf ("%0.2lf\n",
(__polybench_program_total_flops /
(double)(polybench_t_end - polybench_t_start)) / 1000000000);
#else
# ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
printf ("%0.6f\n", polybench_t_end - polybench_t_start);
# else
printf ("%Ld\n", polybench_c_end - polybench_c_start);
# endif
#endif
}
static
void *
xmalloc (size_t num)
{
void* new = NULL;
int ret = posix_memalign (&new, 32, num);
if (! new || ret)
{
fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory");
exit (1);
}
return new;
}
void* polybench_alloc_data(unsigned long long int n, int elt_size)
{
/// FIXME: detect overflow!
size_t val = n;
val *= elt_size;
void* ret = xmalloc (val);
return ret;
}
#endif /* !POLYBENCH_H */
|
GB_binop__le_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_fp32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__le_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__le_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_fp32)
// A*D function (colscale): GB (_AxD__le_fp32)
// D*A function (rowscale): GB (_DxB__le_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__le_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__le_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_fp32)
// C=scalar+B GB (_bind1st__le_fp32)
// C=scalar+B' GB (_bind1st_tran__le_fp32)
// C=A+scalar GB (_bind2nd__le_fp32)
// C=A'+scalar GB (_bind2nd_tran__le_fp32)
// C type: bool
// A type: float
// B,b type: float
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_FP32 || GxB_NO_LE_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__le_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__le_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__le_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
blake2bp-ref.c | /*
BLAKE2 reference source code package - reference C implementations
Written in 2012 by Samuel Neves <sneves@dei.uc.pt>
To the extent possible under law, the author(s) have dedicated all copyright
and related and neighboring rights to this software to the public domain
worldwide. This software is distributed without any warranty.
You should have received a copy of the CC0 Public Domain Dedication along with
this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 4
static inline int blake2bp_init_leaf( blake2b_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset )
{
blake2b_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store64( &P->node_offset, offset );
P->node_depth = 0;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2b_init_param( S, P );
}
static inline int blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen )
{
blake2b_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store64( &P->node_offset, 0 );
P->node_depth = 1;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2b_init_param( S, P );
}
int blake2bp_init( blake2bp_state *S, const uint8_t outlen )
{
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2bp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2bp_init_key( blake2bp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen )
{
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2bp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2bp_update( blake2bp_state *S, const uint8_t *in, uint64_t inlen )
{
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S->S[id__], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2bp_final( blake2bp_state *S, uint8_t *out, const uint8_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2B_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES;
if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES;
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left );
}
blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES );
}
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES );
return blake2b_final( S->R, out, outlen );
}
int blake2bp( uint8_t *out, const void *in, const void *key, uint8_t outlen, uint64_t inlen, uint8_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
blake2b_state S[PARALLELISM_DEGREE][1];
blake2b_state FS[1];
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if( NULL == key && keylen > 0 ) return -1;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( keylen > BLAKE2B_KEYBYTES ) return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node
if( keylen > 0 )
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S[id__], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
if( inlen__ > id__ * BLAKE2B_BLOCKBYTES )
{
const size_t left = inlen__ - id__ * BLAKE2B_BLOCKBYTES;
const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES;
blake2b_update( S[id__], in__, len );
}
blake2b_final( S[id__], hash[id__], BLAKE2B_OUTBYTES );
}
if( blake2bp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1; // Mark as last node
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES );
return blake2b_final( FS, out, outlen );;
}
#if defined(BLAKE2BP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( int argc, char **argv )
{
uint8_t key[BLAKE2B_KEYBYTES];
uint8_t buf[KAT_LENGTH];
for( size_t i = 0; i < BLAKE2B_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2B_OUTBYTES];
blake2bp( hash, buf, key, BLAKE2B_OUTBYTES, i, BLAKE2B_KEYBYTES );
if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) )
{
puts( "error" );
return -1;
}
}
puts( "ok" );
return 0;
}
#endif
|
omp.c | #define _XOPEN_SOURCE 700
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <omp.h>
#include "../include/fs.h"
#include "../include/keygen.h"
#include "../include/encryptor.h"
#include "../include/commons.h"
#define NUM_THREADS 2
int main(int argc, char** argv)
{
check_parameters( argc, argv );
unsigned char encrypted_text[ BLOCK_SIZE ];
unsigned char iv[ IV_LENGTH ] = {1,2,3,4,5,6,7,8};
unsigned char **key = NULL;
long cant_keys;
long success_key = -1;
int encryption_method = -1;
int num_threads = NUM_THREADS;
char * omp_num_threads = getenv("OMP_NUM_THREADS");
int thread_id;
time_t start_time, end_time;
// get the time stamp
start_time = time( NULL );
read_parameters( argv, encrypted_text, &cant_keys );
// get the number of threads from enviroment
if( omp_num_threads != NULL ){
num_threads = strtol( omp_num_threads, NULL, 0 );
if( num_threads <= 0 )
num_threads = NUM_THREADS;
}
omp_set_num_threads( num_threads );
// create the blowfish encryptor
Encryptor *bf = (Encryptor *) malloc( num_threads * sizeof( Encryptor ) );
if( bf == NULL ){
printf("\nerror on encryptor memory allocation\n");
exit(-1);
}
// create the cast5 encryptor
Encryptor *cast5;
cast5 = (Encryptor *) malloc( num_threads * sizeof( Encryptor ) );
if( cast5 == NULL ){
printf("error on encryptor memory allocation\n");
exit(-1);
}
//for( int i = 0; i < num_threads; i++ ){
// init_decryptor( &bf[i], DECRYPT, BLOWFISH, iv, encrypted_text );
//}
//for( int i = 0; i < num_threads; i++){
// init_decryptor( &cast5[i], DECRYPT, CAST5, iv, encrypted_text );
//}
// begin the decryption
key = ( unsigned char ** ) malloc( num_threads * sizeof( unsigned char * ) );
if( key == NULL ) {
printf("\nerror on key memory allocation\n");
exit(-1);
}
for( int i = 0; i < num_threads; i++ ) {
key[i] = ( unsigned char * ) malloc( KEY_LENGTH );
if( key[i] == NULL ) {
printf("\nerror on key memory allocation\n");
exit(-1);
}
}
#pragma omp parallel for private( thread_id ) shared( success_key, encryption_method )
for(long i = 0; i < cant_keys ; i++ ) {
if( success_key == -1 ) {
thread_id = omp_get_thread_num();
keygen_itokey( key[ thread_id ], i );
// BLOWFISH
init_decryptor( &bf[thread_id], DECRYPT, BLOWFISH, iv, encrypted_text );
encryptor_set_key( &bf[ thread_id ], key[ thread_id ] );
encryptor_init( &bf[ thread_id ] );
encryptor_update( &bf[ thread_id ] );
encryptor_final( &bf[ thread_id ] );
if( memcmp( (char *)bf[ thread_id ].output, "Frase", 5 ) == 0 ) {
success_key = i;
encryption_method = BLOWFISH;
}
}
}
#pragma omp parallel for private( thread_id ) shared( success_key, encryption_method )
for(long i = 0; i < cant_keys ; i++ ) {
if( success_key == -1 ) {
thread_id = omp_get_thread_num();
keygen_itokey( key[ thread_id ], i );
//CAST5
init_decryptor( &cast5[thread_id], DECRYPT, CAST5, iv, encrypted_text );
encryptor_set_key( &cast5[ thread_id ], key[ thread_id ] );
encryptor_init( &cast5[ thread_id ] );
encryptor_update( &cast5[ thread_id ] );
encryptor_final( &cast5[ thread_id ] );
if( memcmp( (char *)cast5[ thread_id ].output, "Frase", 5 ) == 0 ) {
success_key = i;
encryption_method = CAST5;
}
}
}
// take the timestamp
end_time = time(NULL);
print_result( success_key, encryption_method, difftime( end_time, start_time ) );
exit(0);
}
|
resample.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS AAA M M PPPP L EEEEE %
% R R E SS A A MM MM P P L E %
% RRRR EEE SSS AAAAA M M M PPPP L EEE %
% R R E SS A A M M P L E %
% R R EEEEE SSSSS A A M M P LLLLL EEEEE %
% %
% %
% MagickCore Pixel Resampling Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% August 2007 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/color-private.h"
#include "magick/cache.h"
#include "magick/draw.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/resample.h"
#include "magick/resize.h"
#include "magick/resize-private.h"
#include "magick/resource_.h"
#include "magick/transform.h"
#include "magick/signature-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/option.h"
/*
EWA Resampling Options
*/
/* select ONE resampling method */
#define EWA 1 /* Normal EWA handling - raw or clamped */
/* if 0 then use "High Quality EWA" */
#define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */
#define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */
/* output debugging information */
#define DEBUG_ELLIPSE 0 /* output ellipse info for debug */
#define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */
#define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */
#if ! FILTER_DIRECT
#define WLUT_WIDTH 1024 /* size of the filter cache */
#endif
/*
Typedef declarations.
*/
struct _ResampleFilter
{
CacheView
*view;
Image
*image;
ExceptionInfo
*exception;
MagickBooleanType
debug;
/* Information about image being resampled */
ssize_t
image_area;
InterpolatePixelMethod
interpolate;
VirtualPixelMethod
virtual_pixel;
FilterTypes
filter;
/* processing settings needed */
MagickBooleanType
limit_reached,
do_interpolate,
average_defined;
MagickPixelPacket
average_pixel;
/* current ellipitical area being resampled around center point */
double
A, B, C,
Vlimit, Ulimit, Uwidth, slope;
#if FILTER_LUT
/* LUT of weights for filtered average in elliptical area */
double
filter_lut[WLUT_WIDTH];
#else
/* Use a Direct call to the filter functions */
ResizeFilter
*filter_def;
double
F;
#endif
/* the practical working support of the filter */
double
support;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResampleFilter() initializes the information resample needs do to a
% scaled lookup of a color from an image, using area sampling.
%
% The algorithm is based on a Elliptical Weighted Average, where the pixels
% found in a large elliptical area is averaged together according to a
% weighting (filter) function. For more details see "Fundamentals of Texture
% Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17,
% 1989. Available for free from, http://www.cs.cmu.edu/~ph/
%
% As EWA resampling (or any sort of resampling) can require a lot of
% calculations to produce a distorted scaling of the source image for each
% output pixel, the ResampleFilter structure generated holds that information
% between individual image resampling.
%
% This function will make the appropriate AcquireVirtualCacheView() calls
% to view the image, calling functions do not need to open a cache view.
%
% Usage Example...
% resample_filter=AcquireResampleFilter(image,exception);
% SetResampleFilter(resample_filter, GaussianFilter, 1.0);
% for (y=0; y < (ssize_t) image->rows; y++) {
% for (x=0; x < (ssize_t) image->columns; x++) {
% u= ....; v= ....;
% ScaleResampleFilter(resample_filter, ... scaling vectors ...);
% (void) ResamplePixelColor(resample_filter,u,v,&pixel);
% ... assign resampled pixel value ...
% }
% }
% DestroyResampleFilter(resample_filter);
%
% The format of the AcquireResampleFilter method is:
%
% ResampleFilter *AcquireResampleFilter(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ResampleFilter *AcquireResampleFilter(const Image *image,
ExceptionInfo *exception)
{
register ResampleFilter
*resample_filter;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
resample_filter=(ResampleFilter *) AcquireMagickMemory(
sizeof(*resample_filter));
if (resample_filter == (ResampleFilter *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(resample_filter,0,sizeof(*resample_filter));
resample_filter->exception=exception;
resample_filter->image=ReferenceImage((Image *) image);
resample_filter->view=AcquireVirtualCacheView(resample_filter->image,exception);
resample_filter->debug=IsEventLogging();
resample_filter->signature=MagickCoreSignature;
resample_filter->image_area=(ssize_t) (image->columns*image->rows);
resample_filter->average_defined = MagickFalse;
/* initialise the resampling filter settings */
SetResampleFilter(resample_filter, image->filter, image->blur);
(void) SetResampleFilterInterpolateMethod(resample_filter,
image->interpolate);
(void) SetResampleFilterVirtualPixelMethod(resample_filter,
GetImageVirtualPixelMethod(image));
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResampleFilter() finalizes and cleans up the resampling
% resample_filter as returned by AcquireResampleFilter(), freeing any memory
% or other information as needed.
%
% The format of the DestroyResampleFilter method is:
%
% ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter)
%
% A description of each parameter follows:
%
% o resample_filter: resampling information structure
%
*/
MagickExport ResampleFilter *DestroyResampleFilter(
ResampleFilter *resample_filter)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->view=DestroyCacheView(resample_filter->view);
resample_filter->image=DestroyImage(resample_filter->image);
#if ! FILTER_LUT
resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def);
#endif
resample_filter->signature=(~MagickCoreSignature);
resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter);
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e P i x e l C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResamplePixelColor() samples the pixel values surrounding the location
% given using an elliptical weighted average, at the scale previously
% calculated, and in the most efficent manner possible for the
% VirtualPixelMethod setting.
%
% The format of the ResamplePixelColor method is:
%
% MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter,
% const double u0,const double v0,MagickPixelPacket *pixel)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o u0,v0: A double representing the center of the area to resample,
% The distortion transformed transformed x,y coordinate.
%
% o pixel: the resampled pixel is returned here.
%
*/
MagickExport MagickBooleanType ResamplePixelColor(
ResampleFilter *resample_filter,const double u0,const double v0,
MagickPixelPacket *pixel)
{
MagickBooleanType
status;
ssize_t u,v, v1, v2, uw, hit;
double u1;
double U,V,Q,DQ,DDQ;
double divisor_c,divisor_m;
register double weight;
register const PixelPacket *pixels;
register const IndexPacket *indexes;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
status=MagickTrue;
/* GetMagickPixelPacket(resample_filter->image,pixel); */
if ( resample_filter->do_interpolate ) {
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
return(status);
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0);
#endif
/*
Does resample area Miss the image Proper?
If and that area a simple solid color - then simply return that color!
This saves a lot of calculation when resampling outside the bounds of
the source image.
However it probably should be expanded to image bounds plus the filters
scaled support size.
*/
hit = 0;
switch ( resample_filter->virtual_pixel ) {
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case MaskVirtualPixelMethod:
if ( resample_filter->limit_reached
|| u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
|| v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++;
break;
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 + resample_filter->Ulimit < 0.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
)
hit++;
break;
case HorizontalTileVirtualPixelMethod:
if ( v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++; /* outside the horizontally tiled images. */
break;
case VerticalTileVirtualPixelMethod:
if ( u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
)
hit++; /* outside the vertically tiled images. */
break;
case DitherVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 + resample_filter->Ulimit < -32.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
)
hit++;
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
/* resampling of area is always needed - no VP limits */
break;
}
if ( hit ) {
/* The area being resampled is simply a solid color
* just return a single lookup color.
*
* Should this return the users requested interpolated color?
*/
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
return(status);
}
/*
When Scaling limits reached, return an 'averaged' result.
*/
if ( resample_filter->limit_reached ) {
switch ( resample_filter->virtual_pixel ) {
/* This is always handled by the above, so no need.
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case GrayVirtualPixelMethod,
case WhiteVirtualPixelMethod
case MaskVirtualPixelMethod:
*/
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case DitherVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
/* We need an average edge pixel, from the correct edge!
How should I calculate an average edge color?
Just returning an averaged neighbourhood,
works well in general, but falls down for TileEdge methods.
This needs to be done properly!!!!!!
*/
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,AverageInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
break;
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
/* just return the background pixel - Is there a better way? */
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel,
resample_filter->exception);
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
default:
/* generate a average color of the WHOLE image */
if ( resample_filter->average_defined == MagickFalse ) {
Image
*average_image;
CacheView
*average_view;
GetMagickPixelPacket(resample_filter->image,(MagickPixelPacket *)
&resample_filter->average_pixel);
resample_filter->average_defined=MagickTrue;
/* Try to get an averaged pixel color of whole image */
average_image=ResizeImage(resample_filter->image,1,1,BoxFilter,1.0,
resample_filter->exception);
if (average_image == (Image *) NULL)
{
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
average_view=AcquireVirtualCacheView(average_image,
&average_image->exception);
pixels=(PixelPacket *)GetCacheViewVirtualPixels(average_view,0,0,1,1,
resample_filter->exception);
if (pixels == (const PixelPacket *) NULL) {
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
indexes=(IndexPacket *) GetCacheViewAuthenticIndexQueue(average_view);
SetMagickPixelPacket(resample_filter->image,pixels,indexes,
&(resample_filter->average_pixel));
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod )
{
/* CheckerTile is a alpha blend of the image's average pixel
color and the current background color */
/* image's average pixel color */
weight = QuantumScale*((MagickRealType)(QuantumRange-
resample_filter->average_pixel.opacity));
resample_filter->average_pixel.red *= weight;
resample_filter->average_pixel.green *= weight;
resample_filter->average_pixel.blue *= weight;
divisor_c = weight;
/* background color */
weight = QuantumScale*((MagickRealType)(QuantumRange-
resample_filter->image->background_color.opacity));
resample_filter->average_pixel.red +=
weight*resample_filter->image->background_color.red;
resample_filter->average_pixel.green +=
weight*resample_filter->image->background_color.green;
resample_filter->average_pixel.blue +=
weight*resample_filter->image->background_color.blue;
resample_filter->average_pixel.opacity +=
resample_filter->image->background_color.opacity;
divisor_c += weight;
/* alpha blend */
resample_filter->average_pixel.red /= divisor_c;
resample_filter->average_pixel.green /= divisor_c;
resample_filter->average_pixel.blue /= divisor_c;
resample_filter->average_pixel.opacity /= 2; /* 50% blend */
}
}
*pixel=resample_filter->average_pixel;
break;
}
return(status);
}
/*
Initialize weighted average data collection
*/
hit = 0;
divisor_c = 0.0;
divisor_m = 0.0;
pixel->red = pixel->green = pixel->blue = 0.0;
if (pixel->matte != MagickFalse) pixel->opacity = 0.0;
if (pixel->colorspace == CMYKColorspace) pixel->index = 0.0;
/*
Determine the parellelogram bounding box fitted to the ellipse
centered at u0,v0. This area is bounding by the lines...
*/
v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */
v2 = (ssize_t)floor(v0 + resample_filter->Vlimit);
/* scan line start and width accross the parallelogram */
u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth;
uw = (ssize_t)(2.0*resample_filter->Uwidth)+1;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2);
(void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw);
#else
# define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */
#endif
/*
Do weighted resampling of all pixels, within the scaled ellipse,
bound by a Parellelogram fitted to the ellipse.
*/
DDQ = 2*resample_filter->A;
for( v=v1; v<=v2; v++ ) {
#if DEBUG_HIT_MISS
long uu = ceil(u1); /* actual pixel location (for debug only) */
(void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v);
#endif
u = (ssize_t)ceil(u1); /* first pixel in scanline */
u1 += resample_filter->slope; /* start of next scan line */
/* location of this first pixel, relative to u0,v0 */
U = (double)u-u0;
V = (double)v-v0;
/* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */
Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V;
DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V;
/* get the scanline of pixels for this v */
pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw,
1,resample_filter->exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewVirtualIndexQueue(resample_filter->view);
/* count up the weighted pixel colors */
for( u=0; u<uw; u++ ) {
weight = 0;
#if FILTER_LUT
/* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */
if ( Q < (double)WLUT_WIDTH ) {
weight = resample_filter->filter_lut[(int)Q];
#else
/* Note that the ellipse has been pre-scaled so F = support^2 */
if ( Q < (double)resample_filter->F ) {
weight = GetResizeFilterWeight(resample_filter->filter_def,
sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */
#endif
if (pixel->matte != MagickFalse)
pixel->opacity += weight*pixels->opacity;
divisor_m += weight;
if (pixel->matte != MagickFalse)
weight *= QuantumScale*((MagickRealType)(QuantumRange-pixels->opacity));
pixel->red += weight*pixels->red;
pixel->green += weight*pixels->green;
pixel->blue += weight*pixels->blue;
if (pixel->colorspace == CMYKColorspace)
pixel->index += weight*(*indexes);
divisor_c += weight;
hit++;
#if DEBUG_HIT_MISS
/* mark the pixel according to hit/miss of the ellipse */
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
} else {
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
}
uu++;
#else
}
#endif
pixels++;
indexes++;
Q += DQ;
DQ += DDQ;
}
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) );
#endif
/*
Result sanity check -- this should NOT happen
*/
if ( hit == 0 || divisor_m <= MagickEpsilon || divisor_c <= MagickEpsilon ) {
/* not enough pixels, or bad weighting in resampling,
resort to direct interpolation */
#if DEBUG_NO_PIXEL_HIT
pixel->opacity = pixel->red = pixel->green = pixel->blue = 0;
pixel->red = QuantumRange; /* show pixels for which EWA fails */
#else
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
#endif
return status;
}
/*
Finialize results of resampling
*/
divisor_m = 1.0/divisor_m;
if (pixel->matte != MagickFalse)
pixel->opacity = (MagickRealType) ClampToQuantum(divisor_m*pixel->opacity);
divisor_c = 1.0/divisor_c;
pixel->red = (MagickRealType) ClampToQuantum(divisor_c*pixel->red);
pixel->green = (MagickRealType) ClampToQuantum(divisor_c*pixel->green);
pixel->blue = (MagickRealType) ClampToQuantum(divisor_c*pixel->blue);
if (pixel->colorspace == CMYKColorspace)
pixel->index = (MagickRealType) ClampToQuantum(divisor_c*pixel->index);
return(MagickTrue);
}
#if EWA && EWA_CLAMP
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
- C l a m p U p A x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampUpAxes() function converts the input vectors into a major and
% minor axis unit vectors, and their magnitude. This allows us to
% ensure that the ellipse generated is never smaller than the unit
% circle and thus never too small for use in EWA resampling.
%
% This purely mathematical 'magic' was provided by Professor Nicolas
% Robidoux and his Masters student Chantal Racette.
%
% Reference: "We Recommend Singular Value Decomposition", David Austin
% http://www.ams.org/samplings/feature-column/fcarc-svd
%
% By generating major and minor axis vectors, we can actually use the
% ellipse in its "canonical form", by remapping the dx,dy of the
% sampled point into distances along the major and minor axis unit
% vectors.
%
% Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form
*/
static inline void ClampUpAxes(const double dux,
const double dvx,
const double duy,
const double dvy,
double *major_mag,
double *minor_mag,
double *major_unit_x,
double *major_unit_y,
double *minor_unit_x,
double *minor_unit_y)
{
/*
* ClampUpAxes takes an input 2x2 matrix
*
* [ a b ] = [ dux duy ]
* [ c d ] = [ dvx dvy ]
*
* and computes from it the major and minor axis vectors [major_x,
* major_y] and [minor_x,minor_y] of the smallest ellipse containing
* both the unit disk and the ellipse which is the image of the unit
* disk by the linear transformation
*
* [ dux duy ] [S] = [s]
* [ dvx dvy ] [T] = [t]
*
* (The vector [S,T] is the difference between a position in output
* space and [X,Y]; the vector [s,t] is the difference between a
* position in input space and [x,y].)
*/
/*
* Output:
*
* major_mag is the half-length of the major axis of the "new"
* ellipse.
*
* minor_mag is the half-length of the minor axis of the "new"
* ellipse.
*
* major_unit_x is the x-coordinate of the major axis direction vector
* of both the "old" and "new" ellipses.
*
* major_unit_y is the y-coordinate of the major axis direction vector.
*
* minor_unit_x is the x-coordinate of the minor axis direction vector.
*
* minor_unit_y is the y-coordinate of the minor axis direction vector.
*
* Unit vectors are useful for computing projections, in particular,
* to compute the distance between a point in output space and the
* center of a unit disk in output space, using the position of the
* corresponding point [s,t] in input space. Following the clamping,
* the square of this distance is
*
* ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2
* +
* ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2
*
* If such distances will be computed for many [s,t]'s, it makes
* sense to actually compute the reciprocal of major_mag and
* minor_mag and multiply them by the above unit lengths.
*
* Now, if you want to modify the input pair of tangent vectors so
* that it defines the modified ellipse, all you have to do is set
*
* newdux = major_mag * major_unit_x
* newdvx = major_mag * major_unit_y
* newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y
* newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x
*
* and use these tangent vectors as if they were the original ones.
* Usually, this is a drastic change in the tangent vectors even if
* the singular values are not clamped; for example, the minor axis
* vector always points in a direction which is 90 degrees
* counterclockwise from the direction of the major axis vector.
*/
/*
* Discussion:
*
* GOAL: Fix things so that the pullback, in input space, of a disk
* of radius r in output space is an ellipse which contains, at
* least, a disc of radius r. (Make this hold for any r>0.)
*
* ESSENCE OF THE METHOD: Compute the product of the first two
* factors of an SVD of the linear transformation defining the
* ellipse and make sure that both its columns have norm at least 1.
* Because rotations and reflexions map disks to themselves, it is
* not necessary to compute the third (rightmost) factor of the SVD.
*
* DETAILS: Find the singular values and (unit) left singular
* vectors of Jinv, clampling up the singular values to 1, and
* multiply the unit left singular vectors by the new singular
* values in order to get the minor and major ellipse axis vectors.
*
* Image resampling context:
*
* The Jacobian matrix of the transformation at the output point
* under consideration is defined as follows:
*
* Consider the transformation (x,y) -> (X,Y) from input locations
* to output locations. (Anthony Thyssen, elsewhere in resample.c,
* uses the notation (u,v) -> (x,y).)
*
* The Jacobian matrix of the transformation at (x,y) is equal to
*
* J = [ A, B ] = [ dX/dx, dX/dy ]
* [ C, D ] [ dY/dx, dY/dy ]
*
* that is, the vector [A,C] is the tangent vector corresponding to
* input changes in the horizontal direction, and the vector [B,D]
* is the tangent vector corresponding to input changes in the
* vertical direction.
*
* In the context of resampling, it is natural to use the inverse
* Jacobian matrix Jinv because resampling is generally performed by
* pulling pixel locations in the output image back to locations in
* the input image. Jinv is
*
* Jinv = [ a, b ] = [ dx/dX, dx/dY ]
* [ c, d ] [ dy/dX, dy/dY ]
*
* Note: Jinv can be computed from J with the following matrix
* formula:
*
* Jinv = 1/(A*D-B*C) [ D, -B ]
* [ -C, A ]
*
* What we do is modify Jinv so that it generates an ellipse which
* is as close as possible to the original but which contains the
* unit disk. This can be accomplished as follows:
*
* Let
*
* Jinv = U Sigma V^T
*
* be an SVD decomposition of Jinv. (The SVD is not unique, but the
* final ellipse does not depend on the particular SVD.)
*
* We could clamp up the entries of the diagonal matrix Sigma so
* that they are at least 1, and then set
*
* Jinv = U newSigma V^T.
*
* However, we do not need to compute V for the following reason:
* V^T is an orthogonal matrix (that is, it represents a combination
* of rotations and reflexions) so that it maps the unit circle to
* itself. For this reason, the exact value of V does not affect the
* final ellipse, and we can choose V to be the identity
* matrix. This gives
*
* Jinv = U newSigma.
*
* In the end, we return the two diagonal entries of newSigma
* together with the two columns of U.
*/
/*
* ClampUpAxes was written by Nicolas Robidoux and Chantal Racette
* of Laurentian University with insightful suggestions from Anthony
* Thyssen and funding from the National Science and Engineering
* Research Council of Canada. It is distinguished from its
* predecessors by its efficient handling of degenerate cases.
*
* The idea of clamping up the EWA ellipse's major and minor axes so
* that the result contains the reconstruction kernel filter support
* is taken from Andreas Gustaffson's Masters thesis "Interactive
* Image Warping", Helsinki University of Technology, Faculty of
* Information Technology, 59 pages, 1993 (see Section 3.6).
*
* The use of the SVD to clamp up the singular values of the
* Jacobian matrix of the pullback transformation for EWA resampling
* is taken from the astrophysicist Craig DeForest. It is
* implemented in his PDL::Transform code (PDL = Perl Data
* Language).
*/
const double a = dux;
const double b = duy;
const double c = dvx;
const double d = dvy;
/*
* n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the
* squares of the singular values of Jinv.
*/
const double aa = a*a;
const double bb = b*b;
const double cc = c*c;
const double dd = d*d;
/*
* Eigenvectors of n are left singular vectors of Jinv.
*/
const double n11 = aa+bb;
const double n12 = a*c+b*d;
const double n21 = n12;
const double n22 = cc+dd;
const double det = a*d-b*c;
const double twice_det = det+det;
const double frobenius_squared = n11+n22;
const double discriminant =
(frobenius_squared+twice_det)*(frobenius_squared-twice_det);
/*
* In exact arithmetic, discriminant can't be negative. In floating
* point, it can, because of the bad conditioning of SVD
* decompositions done through the associated normal matrix.
*/
const double sqrt_discriminant =
sqrt(discriminant > 0.0 ? discriminant : 0.0);
/*
* s1 is the largest singular value of the inverse Jacobian
* matrix. In other words, its reciprocal is the smallest singular
* value of the Jacobian matrix itself.
* If s1 = 0, both singular values are 0, and any orthogonal pair of
* left and right factors produces a singular decomposition of Jinv.
*/
/*
* Initially, we only compute the squares of the singular values.
*/
const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant);
/*
* s2 the smallest singular value of the inverse Jacobian
* matrix. Its reciprocal is the largest singular value of the
* Jacobian matrix itself.
*/
const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant);
const double s1s1minusn11 = s1s1-n11;
const double s1s1minusn22 = s1s1-n22;
/*
* u1, the first column of the U factor of a singular decomposition
* of Jinv, is a (non-normalized) left singular vector corresponding
* to s1. It has entries u11 and u21. We compute u1 from the fact
* that it is an eigenvector of n corresponding to the eigenvalue
* s1^2.
*/
const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11;
const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22;
/*
* The following selects the largest row of n-s1^2 I as the one
* which is used to find the eigenvector. If both s1^2-n11 and
* s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case,
* any vector is an eigenvector; in addition, norm below is equal to
* zero, and, in exact arithmetic, this is the only case in which
* norm = 0. So, setting u1 to the simple but arbitrary vector [1,0]
* if norm = 0 safely takes care of all cases.
*/
const double temp_u11 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 );
const double temp_u21 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 );
const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21);
/*
* Finalize the entries of first left singular vector (associated
* with the largest singular value).
*/
const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 );
const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 );
/*
* Clamp the singular values up to 1.
*/
*major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) );
*minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) );
/*
* Return the unit major and minor axis direction vectors.
*/
*major_unit_x = u11;
*major_unit_y = u21;
*minor_unit_x = -u21;
*minor_unit_y = u11;
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleResampleFilter() does all the calculations needed to resample an image
% at a specific scale, defined by two scaling vectors. This not using
% a orthogonal scaling, but two distorted scaling vectors, to allow the
% generation of a angled ellipse.
%
% As only two deritive scaling vectors are used the center of the ellipse
% must be the center of the lookup. That is any curvature that the
% distortion may produce is discounted.
%
% The input vectors are produced by either finding the derivitives of the
% distortion function, or the partial derivitives from a distortion mapping.
% They do not need to be the orthogonal dx,dy scaling vectors, but can be
% calculated from other derivatives. For example you could use dr,da/r
% polar coordinate vector scaling vectors
%
% If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y)
% Then the scaling vectors are determined from the deritives...
% du/dx, dv/dx and du/dy, dv/dy
% If the resulting scaling vectors is othogonally aligned then...
% dv/dx = 0 and du/dy = 0
% Producing an othogonally alligned ellipse in source space for the area to
% be resampled.
%
% Note that scaling vectors are different to argument order. Argument order
% is the general order the deritives are extracted from the distortion
% equations, and not the scaling vectors. As such the middle two vaules
% may be swapped from what you expect. Caution is advised.
%
% WARNING: It is assumed that any SetResampleFilter() method call will
% always be performed before the ScaleResampleFilter() method, so that the
% size of the ellipse will match the support for the resampling filter being
% used.
%
% The format of the ScaleResampleFilter method is:
%
% void ScaleResampleFilter(const ResampleFilter *resample_filter,
% const double dux,const double duy,const double dvx,const double dvy)
%
% A description of each parameter follows:
%
% o resample_filter: the resampling resample_filterrmation defining the
% image being resampled
%
% o dux,duy,dvx,dvy:
% The deritives or scaling vectors defining the EWA ellipse.
% NOTE: watch the order, which is based on the order deritives
% are usally determined from distortion equations (see above).
% The middle two values may need to be swapped if you are thinking
% in terms of scaling vectors.
%
*/
MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter,
const double dux,const double duy,const double dvx,const double dvy)
{
double A,B,C,F;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->limit_reached = MagickFalse;
/* A 'point' filter forces use of interpolation instead of area sampling */
if ( resample_filter->filter == PointFilter )
return; /* EWA turned off - nothing to do */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "# -----\n" );
(void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n",
dux, dvx, duy, dvy);
#endif
/* Find Ellipse Coefficents such that
A*u^2 + B*u*v + C*v^2 = F
With u,v relative to point around which we are resampling.
And the given scaling dx,dy vectors in u,v space
du/dx,dv/dx and du/dy,dv/dy
*/
#if EWA
/* Direct conversion of derivatives into elliptical coefficients
However when magnifying images, the scaling vectors will be small
resulting in a ellipse that is too small to sample properly.
As such we need to clamp the major/minor axis to a minumum of 1.0
to prevent it getting too small.
*/
#if EWA_CLAMP
{ double major_mag,
minor_mag,
major_x,
major_y,
minor_x,
minor_y;
ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag,
&major_x, &major_y, &minor_x, &minor_y);
major_x *= major_mag; major_y *= major_mag;
minor_x *= minor_mag; minor_y *= minor_mag;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n",
major_x, major_y, minor_x, minor_y);
#endif
A = major_y*major_y+minor_y*minor_y;
B = -2.0*(major_x*major_y+minor_x*minor_y);
C = major_x*major_x+minor_x*minor_x;
F = major_mag*minor_mag;
F *= F; /* square it */
}
#else /* raw unclamped EWA */
A = dvx*dvx+dvy*dvy;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy;
F = dux*dvy-duy*dvx;
F *= F; /* square it */
#endif /* EWA_CLAMP */
#else /* HQ_EWA */
/*
This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his
thesis, which adds a unit circle to the elliptical area so as to do both
Reconstruction and Prefiltering of the pixels in the resampling. It also
means it is always likely to have at least 4 pixels within the area of the
ellipse, for weighted averaging. No scaling will result with F == 4.0 and
a circle of radius 2.0, and F smaller than this means magnification is
being used.
NOTE: This method produces a very blury result at near unity scale while
producing perfect results for strong minitification and magnifications.
However filter support is fixed to 2.0 (no good for Windowed Sinc filters)
*/
A = dvx*dvx+dvy*dvy+1;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy+1;
F = A*C - B*B/4;
#endif
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F);
/* Figure out the various information directly about the ellipse.
This information currently not needed at this time, but may be
needed later for better limit determination.
It is also good to have as a record for future debugging
*/
{ double alpha, beta, gamma, Major, Minor;
double Eccentricity, Ellipse_Area, Ellipse_Angle;
alpha = A+C;
beta = A-C;
gamma = sqrt(beta*beta + B*B );
if ( alpha - gamma <= MagickEpsilon )
Major= MagickMaximumValue;
else
Major= sqrt(2*F/(alpha - gamma));
Minor = sqrt(2*F/(alpha + gamma));
(void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor );
/* other information about ellipse include... */
Eccentricity = Major/Minor;
Ellipse_Area = MagickPI*Major*Minor;
Ellipse_Angle = atan2(B, A-C);
(void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n",
(double) RadiansToDegrees(Ellipse_Angle), Ellipse_Area);
}
#endif
/* If one or both of the scaling vectors is impossibly large
(producing a very large raw F value), we may as well not bother
doing any form of resampling since resampled area is very large.
In this case some alternative means of pixel sampling, such as
the average of the whole image is needed to get a reasonable
result. Calculate only as needed.
*/
if ( (4*A*C - B*B) > MagickMaximumValue ) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse to match the filters support
(that is, multiply F by the square of the support)
Simplier to just multiply it by the support twice!
*/
F *= resample_filter->support;
F *= resample_filter->support;
/* Orthogonal bounds of the ellipse */
resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B));
resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B));
/* Horizontally aligned parallelogram fitted to Ellipse */
resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */
resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n",
resample_filter->Ulimit, resample_filter->Vlimit,
resample_filter->Uwidth, resample_filter->slope );
#endif
/* Check the absolute area of the parallelogram involved.
* This limit needs more work, as it is too slow for larger images
* with tiled views of the horizon.
*/
if ( (resample_filter->Uwidth * resample_filter->Vlimit)
> (4.0*resample_filter->image_area)) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse formula to directly index the Filter Lookup Table */
{ register double scale;
#if FILTER_LUT
/* scale so that F = WLUT_WIDTH; -- hardcoded */
scale = (double)WLUT_WIDTH/F;
#else
/* scale so that F = resample_filter->F (support^2) */
scale = resample_filter->F/F;
#endif
resample_filter->A = A*scale;
resample_filter->B = B*scale;
resample_filter->C = C*scale;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilter() set the resampling filter lookup table based on a
% specific filter. Note that the filter is used as a radial filter not as a
% two pass othogonally aligned resampling filter.
%
% The format of the SetResampleFilter method is:
%
% void SetResampleFilter(ResampleFilter *resample_filter,
% const FilterTypes filter,const double blur)
%
% A description of each parameter follows:
%
% o resample_filter: resampling resample_filterrmation structure
%
% o filter: the resize filter for elliptical weighting LUT
%
% o blur: filter blur factor (radial scaling) for elliptical weighting LUT
%
*/
MagickExport void SetResampleFilter(ResampleFilter *resample_filter,
const FilterTypes filter,const double blur)
{
ResizeFilter
*resize_filter;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->do_interpolate = MagickFalse;
resample_filter->filter = filter;
/* Default cylindrical filter is a Cubic Keys filter */
if ( filter == UndefinedFilter )
resample_filter->filter = RobidouxFilter;
if ( resample_filter->filter == PointFilter ) {
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
resize_filter = AcquireResizeFilter(resample_filter->image,
resample_filter->filter,blur,MagickTrue,resample_filter->exception);
if (resize_filter == (ResizeFilter *) NULL) {
(void) ThrowMagickException(resample_filter->exception,GetMagickModule(),
ModuleError, "UnableToSetFilteringValue",
"Fall back to Interpolated 'Point' filter");
resample_filter->filter = PointFilter;
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
/* Get the practical working support for the filter,
* after any API call blur factors have been accoded for.
*/
#if EWA
resample_filter->support = GetResizeFilterSupport(resize_filter);
#else
resample_filter->support = 2.0; /* fixed support size for HQ-EWA */
#endif
#if FILTER_LUT
/* Fill the LUT with the weights from the selected filter function */
{ register int
Q;
double
r_scale;
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = (double)
GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale);
/* finished with the resize filter */
resize_filter = DestroyResizeFilter(resize_filter);
}
#else
/* save the filter and the scaled ellipse bounds needed for filter */
resample_filter->filter_def = resize_filter;
resample_filter->F = resample_filter->support*resample_filter->support;
#endif
/*
Adjust the scaling of the default unit circle
This assumes that any real scaling changes will always
take place AFTER the filter method has been initialized.
*/
ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0);
#if 0
/*
This is old code kept as a reference only. Basically it generates
a Gaussian bell curve, with sigma = 0.5 if the support is 2.0
Create Normal Gaussian 2D Filter Weighted Lookup Table.
A normal EWA guassual lookup would use exp(Q*ALPHA)
where Q = distance squared from 0.0 (center) to 1.0 (edge)
and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767
The table is of length 1024, and equates to support radius of 2.0
thus needs to be scaled by ALPHA*4/1024 and any blur factor squared
The it comes from reference code provided by Fred Weinhaus.
*/
r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = exp((double)Q*r_scale);
resample_filter->support = WLUT_WIDTH;
#endif
#if FILTER_LUT
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp single
#endif
{
if (IsMagickTrue(GetImageArtifact(resample_filter->image,
"resample:verbose")) )
{
register int
Q;
double
r_scale;
/* Debug output of the filter weighting LUT
Gnuplot the LUT data, the x scale index has been adjusted
plot [0:2][-.2:1] "lut.dat" with lines
The filter values should be normalized for comparision
*/
printf("#\n");
printf("# Resampling Filter LUT (%d values) for '%s' filter\n",
WLUT_WIDTH, CommandOptionToMnemonic(MagickFilterOptions,
resample_filter->filter) );
printf("#\n");
printf("# Note: values in table are using a squared radius lookup.\n");
printf("# As such its distribution is not uniform.\n");
printf("#\n");
printf("# The X value is the support distance for the Y weight\n");
printf("# so you can use gnuplot to plot this cylindrical filter\n");
printf("# plot [0:2][-.2:1] \"lut.dat\" with lines\n");
printf("#\n");
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
printf("%8.*g %.*g\n",
GetMagickPrecision(),sqrt((double)Q)*r_scale,
GetMagickPrecision(),resample_filter->filter_lut[Q] );
printf("\n\n"); /* generate a 'break' in gnuplot if multiple outputs */
}
/* Output the above once only for each image, and each setting
(void) DeleteImageArtifact(resample_filter->image,"resample:verbose");
*/
}
#endif /* FILTER_LUT */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterInterpolateMethod() sets the resample filter interpolation
% method.
%
% The format of the SetResampleFilterInterpolateMethod method is:
%
% MagickBooleanType SetResampleFilterInterpolateMethod(
% ResampleFilter *resample_filter,const InterpolateMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the interpolation method.
%
*/
MagickExport MagickBooleanType SetResampleFilterInterpolateMethod(
ResampleFilter *resample_filter,const InterpolatePixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->interpolate=method;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterVirtualPixelMethod() changes the virtual pixel method
% associated with the specified resample filter.
%
% The format of the SetResampleFilterVirtualPixelMethod method is:
%
% MagickBooleanType SetResampleFilterVirtualPixelMethod(
% ResampleFilter *resample_filter,const VirtualPixelMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the virtual pixel method.
%
*/
MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod(
ResampleFilter *resample_filter,const VirtualPixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->virtual_pixel=method;
if (method != UndefinedVirtualPixelMethod)
(void) SetCacheViewVirtualPixelMethod(resample_filter->view,method);
return(MagickTrue);
}
|
feature.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF EEEEE AAA TTTTT U U RRRR EEEEE %
% F E A A T U U R R E %
% FFF EEE AAAAA T U U RRRR EEE %
% F E A A T U U R R E %
% F EEEEE A A T UUU R R EEEEE %
% %
% %
% MagickCore Image Feature Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/animate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/feature.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/morphology-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a n n y E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of
% edges in images.
%
% The format of the CannyEdgeImage method is:
%
% Image *CannyEdgeImage(const Image *image,const double radius,
% const double sigma,const double lower_percent,
% const double upper_percent,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the gaussian smoothing filter.
%
% o sigma: the sigma of the gaussian smoothing filter.
%
% o lower_percent: percentage of edge pixels in the lower threshold.
%
% o upper_percent: percentage of edge pixels in the upper threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _CannyInfo
{
double
magnitude,
intensity;
int
orientation;
ssize_t
x,
y;
} CannyInfo;
static inline MagickBooleanType IsAuthenticPixel(const Image *image,
const ssize_t x,const ssize_t y)
{
if ((x < 0) || (x >= (ssize_t) image->columns))
return(MagickFalse);
if ((y < 0) || (y >= (ssize_t) image->rows))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view,
MatrixInfo *canny_cache,const ssize_t x,const ssize_t y,
const double lower_threshold,ExceptionInfo *exception)
{
CannyInfo
edge,
pixel;
MagickBooleanType
status;
register PixelPacket
*q;
register ssize_t
i;
q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
q->red=QuantumRange;
q->green=QuantumRange;
q->blue=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
edge.x=x;
edge.y=y;
if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
for (i=1; i != 0; )
{
ssize_t
v;
i--;
status=GetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
for (v=(-1); v <= 1; v++)
{
ssize_t
u;
for (u=(-1); u <= 1; u++)
{
if ((u == 0) && (v == 0))
continue;
if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse)
continue;
/*
Not an edge if gradient value is below the lower threshold.
*/
q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1,
exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel);
if (status == MagickFalse)
return(MagickFalse);
if ((GetPixelIntensity(edge_image,q) == 0.0) &&
(pixel.intensity >= lower_threshold))
{
q->red=QuantumRange;
q->green=QuantumRange;
q->blue=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
edge.x+=u;
edge.y+=v;
status=SetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
i++;
}
}
}
}
return(MagickTrue);
}
MagickExport Image *CannyEdgeImage(const Image *image,const double radius,
const double sigma,const double lower_percent,const double upper_percent,
ExceptionInfo *exception)
{
#define CannyEdgeImageTag "CannyEdge/Image"
CacheView
*edge_view;
CannyInfo
pixel;
char
geometry[MaxTextExtent];
double
lower_threshold,
max,
min,
upper_threshold;
Image
*edge_image;
KernelInfo
*kernel_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*canny_cache;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/*
Filter out noise.
*/
(void) FormatLocaleString(geometry,MaxTextExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
edge_image=MorphologyApply(image,DefaultChannels,ConvolveMorphology,1,
kernel_info,UndefinedCompositeOp,0.0,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageColorspace(edge_image,GRAYColorspace) == MagickFalse)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
/*
Find the intensity gradient of the image.
*/
canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows,
sizeof(CannyInfo),exception);
if (canny_cache == (MatrixInfo *) NULL)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
status=MagickTrue;
edge_view=AcquireVirtualCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
double
dx,
dy;
register const PixelPacket
*restrict kernel_pixels;
ssize_t
v;
static double
Gx[2][2] =
{
{ -1.0, +1.0 },
{ -1.0, +1.0 }
},
Gy[2][2] =
{
{ +1.0, +1.0 },
{ -1.0, -1.0 }
};
(void) ResetMagickMemory(&pixel,0,sizeof(pixel));
dx=0.0;
dy=0.0;
kernel_pixels=p;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
double
intensity;
intensity=GetPixelIntensity(edge_image,kernel_pixels+u);
dx+=0.5*Gx[v][u]*intensity;
dy+=0.5*Gy[v][u]*intensity;
}
kernel_pixels+=edge_image->columns+1;
}
pixel.magnitude=hypot(dx,dy);
pixel.orientation=0;
if (fabs(dx) > MagickEpsilon)
{
double
slope;
slope=dy/dx;
if (slope < 0.0)
{
if (slope < -2.41421356237)
pixel.orientation=0;
else
if (slope < -0.414213562373)
pixel.orientation=1;
else
pixel.orientation=2;
}
else
{
if (slope > 2.41421356237)
pixel.orientation=0;
else
if (slope > 0.414213562373)
pixel.orientation=3;
else
pixel.orientation=2;
}
}
if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse)
continue;
p++;
}
}
edge_view=DestroyCacheView(edge_view);
/*
Non-maxima suppression, remove pixels that are not considered to be part
of an edge.
*/
progress=0;
(void) GetMatrixElement(canny_cache,0,0,&pixel);
max=pixel.intensity;
min=pixel.intensity;
edge_view=AcquireAuthenticCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
alpha_pixel,
beta_pixel,
pixel;
(void) GetMatrixElement(canny_cache,x,y,&pixel);
switch (pixel.orientation)
{
case 0:
default:
{
/*
0 degrees, north and south.
*/
(void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel);
break;
}
case 1:
{
/*
45 degrees, northwest and southeast.
*/
(void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel);
break;
}
case 2:
{
/*
90 degrees, east and west.
*/
(void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel);
break;
}
case 3:
{
/*
135 degrees, northeast and southwest.
*/
(void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel);
(void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel);
break;
}
}
pixel.intensity=pixel.magnitude;
if ((pixel.magnitude < alpha_pixel.magnitude) ||
(pixel.magnitude < beta_pixel.magnitude))
pixel.intensity=0;
(void) SetMatrixElement(canny_cache,x,y,&pixel);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
{
if (pixel.intensity < min)
min=pixel.intensity;
if (pixel.intensity > max)
max=pixel.intensity;
}
q->red=0;
q->green=0;
q->blue=0;
q++;
}
if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
proceed=SetImageProgress(image,CannyEdgeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
edge_view=DestroyCacheView(edge_view);
/*
Estimate hysteresis threshold.
*/
lower_threshold=lower_percent*(max-min)+min;
upper_threshold=upper_percent*(max-min)+min;
/*
Hysteresis threshold.
*/
edge_view=AcquireAuthenticCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
register const PixelPacket
*restrict p;
/*
Edge if pixel gradient higher than upper threshold.
*/
p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
status=GetMatrixElement(canny_cache,x,y,&pixel);
if (status == MagickFalse)
continue;
if ((GetPixelIntensity(edge_image,p) == 0.0) &&
(pixel.intensity >= upper_threshold))
status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold,
exception);
}
}
edge_view=DestroyCacheView(edge_view);
/*
Free resources.
*/
canny_cache=DestroyMatrixInfo(canny_cache);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l F e a t u r e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelFeatures() returns features for each channel in the image in
% each of four directions (horizontal, vertical, left and right diagonals)
% for the specified distance. The features include the angular second
% moment, contrast, correlation, sum of squares: variance, inverse difference
% moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information
% measures of correlation 2, and maximum correlation coefficient. You can
% access the red channel contrast, for example, like this:
%
% channel_features=GetImageChannelFeatures(image,1,exception);
% contrast=channel_features[RedChannel].contrast[0];
%
% Use MagickRelinquishMemory() to free the features buffer.
%
% The format of the GetImageChannelFeatures method is:
%
% ChannelFeatures *GetImageChannelFeatures(const Image *image,
% const size_t distance,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o distance: the distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t MagickAbsoluteValue(const ssize_t x)
{
if (x < 0)
return(-x);
return(x);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelFeatures *GetImageChannelFeatures(const Image *image,
const size_t distance,ExceptionInfo *exception)
{
typedef struct _ChannelStatistics
{
DoublePixelPacket
direction[4]; /* horizontal, vertical, left and right diagonals */
} ChannelStatistics;
CacheView
*image_view;
ChannelFeatures
*channel_features;
ChannelStatistics
**cooccurrence,
correlation,
*density_x,
*density_xy,
*density_y,
entropy_x,
entropy_xy,
entropy_xy1,
entropy_xy2,
entropy_y,
mean,
**Q,
*sum,
sum_squares,
variance;
LongPixelPacket
gray,
*grays;
MagickBooleanType
status;
register ssize_t
i;
size_t
length;
ssize_t
y;
unsigned int
number_grays;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns < (distance+1)) || (image->rows < (distance+1)))
return((ChannelFeatures *) NULL);
length=CompositeChannels+1UL;
channel_features=(ChannelFeatures *) AcquireQuantumMemory(length,
sizeof(*channel_features));
if (channel_features == (ChannelFeatures *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_features,0,length*
sizeof(*channel_features));
/*
Form grays.
*/
grays=(LongPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays));
if (grays == (LongPixelPacket *) NULL)
{
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
for (i=0; i <= (ssize_t) MaxMap; i++)
{
grays[i].red=(~0U);
grays[i].green=(~0U);
grays[i].blue=(~0U);
grays[i].opacity=(~0U);
grays[i].index=(~0U);
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
grays[ScaleQuantumToMap(GetPixelRed(p))].red=
ScaleQuantumToMap(GetPixelRed(p));
grays[ScaleQuantumToMap(GetPixelGreen(p))].green=
ScaleQuantumToMap(GetPixelGreen(p));
grays[ScaleQuantumToMap(GetPixelBlue(p))].blue=
ScaleQuantumToMap(GetPixelBlue(p));
if (image->colorspace == CMYKColorspace)
grays[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index=
ScaleQuantumToMap(GetPixelIndex(indexes+x));
if (image->matte != MagickFalse)
grays[ScaleQuantumToMap(GetPixelOpacity(p))].opacity=
ScaleQuantumToMap(GetPixelOpacity(p));
p++;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
return(channel_features);
}
(void) ResetMagickMemory(&gray,0,sizeof(gray));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if (grays[i].red != ~0U)
grays[(ssize_t) gray.red++].red=grays[i].red;
if (grays[i].green != ~0U)
grays[(ssize_t) gray.green++].green=grays[i].green;
if (grays[i].blue != ~0U)
grays[(ssize_t) gray.blue++].blue=grays[i].blue;
if (image->colorspace == CMYKColorspace)
if (grays[i].index != ~0U)
grays[(ssize_t) gray.index++].index=grays[i].index;
if (image->matte != MagickFalse)
if (grays[i].opacity != ~0U)
grays[(ssize_t) gray.opacity++].opacity=grays[i].opacity;
}
/*
Allocate spatial dependence matrix.
*/
number_grays=gray.red;
if (gray.green > number_grays)
number_grays=gray.green;
if (gray.blue > number_grays)
number_grays=gray.blue;
if (image->colorspace == CMYKColorspace)
if (gray.index > number_grays)
number_grays=gray.index;
if (image->matte != MagickFalse)
if (gray.opacity > number_grays)
number_grays=gray.opacity;
cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays,
sizeof(*cooccurrence));
density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_x));
density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_xy));
density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_y));
Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q));
sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum));
if ((cooccurrence == (ChannelStatistics **) NULL) ||
(density_x == (ChannelStatistics *) NULL) ||
(density_xy == (ChannelStatistics *) NULL) ||
(density_y == (ChannelStatistics *) NULL) ||
(Q == (ChannelStatistics **) NULL) ||
(sum == (ChannelStatistics *) NULL))
{
if (Q != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
}
if (sum != (ChannelStatistics *) NULL)
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
if (density_y != (ChannelStatistics *) NULL)
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
if (density_xy != (ChannelStatistics *) NULL)
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
if (density_x != (ChannelStatistics *) NULL)
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
if (cooccurrence != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(
cooccurrence);
}
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
(void) ResetMagickMemory(&correlation,0,sizeof(correlation));
(void) ResetMagickMemory(density_x,0,2*(number_grays+1)*sizeof(*density_x));
(void) ResetMagickMemory(density_xy,0,2*(number_grays+1)*sizeof(*density_xy));
(void) ResetMagickMemory(density_y,0,2*(number_grays+1)*sizeof(*density_y));
(void) ResetMagickMemory(&mean,0,sizeof(mean));
(void) ResetMagickMemory(sum,0,number_grays*sizeof(*sum));
(void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares));
(void) ResetMagickMemory(density_xy,0,2*number_grays*sizeof(*density_xy));
(void) ResetMagickMemory(&entropy_x,0,sizeof(entropy_x));
(void) ResetMagickMemory(&entropy_xy,0,sizeof(entropy_xy));
(void) ResetMagickMemory(&entropy_xy1,0,sizeof(entropy_xy1));
(void) ResetMagickMemory(&entropy_xy2,0,sizeof(entropy_xy2));
(void) ResetMagickMemory(&entropy_y,0,sizeof(entropy_y));
(void) ResetMagickMemory(&variance,0,sizeof(variance));
for (i=0; i < (ssize_t) number_grays; i++)
{
cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,
sizeof(**cooccurrence));
Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q));
if ((cooccurrence[i] == (ChannelStatistics *) NULL) ||
(Q[i] == (ChannelStatistics *) NULL))
break;
(void) ResetMagickMemory(cooccurrence[i],0,number_grays*
sizeof(**cooccurrence));
(void) ResetMagickMemory(Q[i],0,number_grays*sizeof(**Q));
}
if (i < (ssize_t) number_grays)
{
for (i--; i >= 0; i--)
{
if (Q[i] != (ChannelStatistics *) NULL)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
if (cooccurrence[i] != (ChannelStatistics *) NULL)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
}
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Initialize spatial dependence matrix.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
ssize_t
i,
offset,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,y,image->columns+
2*distance,distance+2,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
p+=distance;
indexes+=distance;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < 4; i++)
{
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
offset=(ssize_t) distance;
break;
}
case 1:
{
/*
Vertical adjacency.
*/
offset=(ssize_t) (image->columns+2*distance);
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)+distance);
break;
}
}
u=0;
v=0;
while (grays[u].red != ScaleQuantumToMap(GetPixelRed(p)))
u++;
while (grays[v].red != ScaleQuantumToMap(GetPixelRed(p+offset)))
v++;
cooccurrence[u][v].direction[i].red++;
cooccurrence[v][u].direction[i].red++;
u=0;
v=0;
while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(p)))
u++;
while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(p+offset)))
v++;
cooccurrence[u][v].direction[i].green++;
cooccurrence[v][u].direction[i].green++;
u=0;
v=0;
while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(p)))
u++;
while (grays[v].blue != ScaleQuantumToMap((p+offset)->blue))
v++;
cooccurrence[u][v].direction[i].blue++;
cooccurrence[v][u].direction[i].blue++;
if (image->colorspace == CMYKColorspace)
{
u=0;
v=0;
while (grays[u].index != ScaleQuantumToMap(GetPixelIndex(indexes+x)))
u++;
while (grays[v].index != ScaleQuantumToMap(GetPixelIndex(indexes+x+offset)))
v++;
cooccurrence[u][v].direction[i].index++;
cooccurrence[v][u].direction[i].index++;
}
if (image->matte != MagickFalse)
{
u=0;
v=0;
while (grays[u].opacity != ScaleQuantumToMap(GetPixelOpacity(p)))
u++;
while (grays[v].opacity != ScaleQuantumToMap((p+offset)->opacity))
v++;
cooccurrence[u][v].direction[i].opacity++;
cooccurrence[v][u].direction[i].opacity++;
}
}
p++;
}
}
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Normalize spatial dependence matrix.
*/
for (i=0; i < 4; i++)
{
double
normalize;
register ssize_t
y;
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
normalize=2.0*image->rows*(image->columns-distance);
break;
}
case 1:
{
/*
Vertical adjacency.
*/
normalize=2.0*(image->rows-distance)*image->columns;
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
}
normalize=PerceptibleReciprocal(normalize);
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
cooccurrence[x][y].direction[i].red*=normalize;
cooccurrence[x][y].direction[i].green*=normalize;
cooccurrence[x][y].direction[i].blue*=normalize;
if (image->colorspace == CMYKColorspace)
cooccurrence[x][y].direction[i].index*=normalize;
if (image->matte != MagickFalse)
cooccurrence[x][y].direction[i].opacity*=normalize;
}
}
}
/*
Compute texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Angular second moment: measure of homogeneity of the image.
*/
channel_features[RedChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].red*
cooccurrence[x][y].direction[i].red;
channel_features[GreenChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].green*
cooccurrence[x][y].direction[i].green;
channel_features[BlueChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].blue*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].index*
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].opacity*
cooccurrence[x][y].direction[i].opacity;
/*
Correlation: measure of linear-dependencies in the image.
*/
sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum[y].direction[i].index+=cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
sum[y].direction[i].opacity+=cooccurrence[x][y].direction[i].opacity;
correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red;
correlation.direction[i].green+=x*y*
cooccurrence[x][y].direction[i].green;
correlation.direction[i].blue+=x*y*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
correlation.direction[i].index+=x*y*
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
correlation.direction[i].opacity+=x*y*
cooccurrence[x][y].direction[i].opacity;
/*
Inverse Difference Moment.
*/
channel_features[RedChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1);
channel_features[GreenChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1);
channel_features[BlueChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].index/((y-x)*(y-x)+1);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].opacity/((y-x)*(y-x)+1);
/*
Sum average.
*/
density_xy[y+x+2].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[y+x+2].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[y+x+2].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[y+x+2].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_xy[y+x+2].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
/*
Entropy.
*/
channel_features[RedChannel].entropy[i]-=
cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
channel_features[GreenChannel].entropy[i]-=
cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
channel_features[BlueChannel].entropy[i]-=
cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].entropy[i]-=
cooccurrence[x][y].direction[i].index*
MagickLog10(cooccurrence[x][y].direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].entropy[i]-=
cooccurrence[x][y].direction[i].opacity*
MagickLog10(cooccurrence[x][y].direction[i].opacity);
/*
Information Measures of Correlation.
*/
density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_x[x].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_x[x].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_y[y].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_y[y].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
}
mean.direction[i].red+=y*sum[y].direction[i].red;
sum_squares.direction[i].red+=y*y*sum[y].direction[i].red;
mean.direction[i].green+=y*sum[y].direction[i].green;
sum_squares.direction[i].green+=y*y*sum[y].direction[i].green;
mean.direction[i].blue+=y*sum[y].direction[i].blue;
sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
{
mean.direction[i].index+=y*sum[y].direction[i].index;
sum_squares.direction[i].index+=y*y*sum[y].direction[i].index;
}
if (image->matte != MagickFalse)
{
mean.direction[i].opacity+=y*sum[y].direction[i].opacity;
sum_squares.direction[i].opacity+=y*y*sum[y].direction[i].opacity;
}
}
/*
Correlation: measure of linear-dependencies in the image.
*/
channel_features[RedChannel].correlation[i]=
(correlation.direction[i].red-mean.direction[i].red*
mean.direction[i].red)/(sqrt(sum_squares.direction[i].red-
(mean.direction[i].red*mean.direction[i].red))*sqrt(
sum_squares.direction[i].red-(mean.direction[i].red*
mean.direction[i].red)));
channel_features[GreenChannel].correlation[i]=
(correlation.direction[i].green-mean.direction[i].green*
mean.direction[i].green)/(sqrt(sum_squares.direction[i].green-
(mean.direction[i].green*mean.direction[i].green))*sqrt(
sum_squares.direction[i].green-(mean.direction[i].green*
mean.direction[i].green)));
channel_features[BlueChannel].correlation[i]=
(correlation.direction[i].blue-mean.direction[i].blue*
mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue-
(mean.direction[i].blue*mean.direction[i].blue))*sqrt(
sum_squares.direction[i].blue-(mean.direction[i].blue*
mean.direction[i].blue)));
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].correlation[i]=
(correlation.direction[i].index-mean.direction[i].index*
mean.direction[i].index)/(sqrt(sum_squares.direction[i].index-
(mean.direction[i].index*mean.direction[i].index))*sqrt(
sum_squares.direction[i].index-(mean.direction[i].index*
mean.direction[i].index)));
if (image->matte != MagickFalse)
channel_features[OpacityChannel].correlation[i]=
(correlation.direction[i].opacity-mean.direction[i].opacity*
mean.direction[i].opacity)/(sqrt(sum_squares.direction[i].opacity-
(mean.direction[i].opacity*mean.direction[i].opacity))*sqrt(
sum_squares.direction[i].opacity-(mean.direction[i].opacity*
mean.direction[i].opacity)));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=2; x < (ssize_t) (2*number_grays); x++)
{
/*
Sum average.
*/
channel_features[RedChannel].sum_average[i]+=
x*density_xy[x].direction[i].red;
channel_features[GreenChannel].sum_average[i]+=
x*density_xy[x].direction[i].green;
channel_features[BlueChannel].sum_average[i]+=
x*density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].sum_average[i]+=
x*density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].sum_average[i]+=
x*density_xy[x].direction[i].opacity;
/*
Sum entropy.
*/
channel_features[RedChannel].sum_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenChannel].sum_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BlueChannel].sum_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].sum_entropy[i]-=
density_xy[x].direction[i].index*
MagickLog10(density_xy[x].direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].sum_entropy[i]-=
density_xy[x].direction[i].opacity*
MagickLog10(density_xy[x].direction[i].opacity);
/*
Sum variance.
*/
channel_features[RedChannel].sum_variance[i]+=
(x-channel_features[RedChannel].sum_entropy[i])*
(x-channel_features[RedChannel].sum_entropy[i])*
density_xy[x].direction[i].red;
channel_features[GreenChannel].sum_variance[i]+=
(x-channel_features[GreenChannel].sum_entropy[i])*
(x-channel_features[GreenChannel].sum_entropy[i])*
density_xy[x].direction[i].green;
channel_features[BlueChannel].sum_variance[i]+=
(x-channel_features[BlueChannel].sum_entropy[i])*
(x-channel_features[BlueChannel].sum_entropy[i])*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].sum_variance[i]+=
(x-channel_features[IndexChannel].sum_entropy[i])*
(x-channel_features[IndexChannel].sum_entropy[i])*
density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].sum_variance[i]+=
(x-channel_features[OpacityChannel].sum_entropy[i])*
(x-channel_features[OpacityChannel].sum_entropy[i])*
density_xy[x].direction[i].opacity;
}
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Sum of Squares: Variance
*/
variance.direction[i].red+=(y-mean.direction[i].red+1)*
(y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red;
variance.direction[i].green+=(y-mean.direction[i].green+1)*
(y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green;
variance.direction[i].blue+=(y-mean.direction[i].blue+1)*
(y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].index+=(y-mean.direction[i].index+1)*
(y-mean.direction[i].index+1)*cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
variance.direction[i].opacity+=(y-mean.direction[i].opacity+1)*
(y-mean.direction[i].opacity+1)*
cooccurrence[x][y].direction[i].opacity;
/*
Sum average / Difference Variance.
*/
density_xy[MagickAbsoluteValue(y-x)].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[MagickAbsoluteValue(y-x)].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[MagickAbsoluteValue(y-x)].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_xy[MagickAbsoluteValue(y-x)].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
/*
Information Measures of Correlation.
*/
entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
entropy_xy.direction[i].index-=cooccurrence[x][y].direction[i].index*
MagickLog10(cooccurrence[x][y].direction[i].index);
if (image->matte != MagickFalse)
entropy_xy.direction[i].opacity-=
cooccurrence[x][y].direction[i].opacity*MagickLog10(
cooccurrence[x][y].direction[i].opacity);
entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red*
MagickLog10(density_x[x].direction[i].red*
density_y[y].direction[i].red));
entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green*
MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue*
density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy1.direction[i].index-=(
cooccurrence[x][y].direction[i].index*MagickLog10(
density_x[x].direction[i].index*density_y[y].direction[i].index));
if (image->matte != MagickFalse)
entropy_xy1.direction[i].opacity-=(
cooccurrence[x][y].direction[i].opacity*MagickLog10(
density_x[x].direction[i].opacity*
density_y[y].direction[i].opacity));
entropy_xy2.direction[i].red-=(density_x[x].direction[i].red*
density_y[y].direction[i].red*MagickLog10(
density_x[x].direction[i].red*density_y[y].direction[i].red));
entropy_xy2.direction[i].green-=(density_x[x].direction[i].green*
density_y[y].direction[i].green*MagickLog10(
density_x[x].direction[i].green*density_y[y].direction[i].green));
entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue*
density_y[y].direction[i].blue*MagickLog10(
density_x[x].direction[i].blue*density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy2.direction[i].index-=(density_x[x].direction[i].index*
density_y[y].direction[i].index*MagickLog10(
density_x[x].direction[i].index*density_y[y].direction[i].index));
if (image->matte != MagickFalse)
entropy_xy2.direction[i].opacity-=(density_x[x].direction[i].opacity*
density_y[y].direction[i].opacity*MagickLog10(
density_x[x].direction[i].opacity*
density_y[y].direction[i].opacity));
}
}
channel_features[RedChannel].variance_sum_of_squares[i]=
variance.direction[i].red;
channel_features[GreenChannel].variance_sum_of_squares[i]=
variance.direction[i].green;
channel_features[BlueChannel].variance_sum_of_squares[i]=
variance.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[RedChannel].variance_sum_of_squares[i]=
variance.direction[i].index;
if (image->matte != MagickFalse)
channel_features[RedChannel].variance_sum_of_squares[i]=
variance.direction[i].opacity;
}
/*
Compute more texture features.
*/
(void) ResetMagickMemory(&variance,0,sizeof(variance));
(void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Difference variance.
*/
variance.direction[i].red+=density_xy[x].direction[i].red;
variance.direction[i].green+=density_xy[x].direction[i].green;
variance.direction[i].blue+=density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].index+=density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
variance.direction[i].opacity+=density_xy[x].direction[i].opacity;
sum_squares.direction[i].red+=density_xy[x].direction[i].red*
density_xy[x].direction[i].red;
sum_squares.direction[i].green+=density_xy[x].direction[i].green*
density_xy[x].direction[i].green;
sum_squares.direction[i].blue+=density_xy[x].direction[i].blue*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum_squares.direction[i].index+=density_xy[x].direction[i].index*
density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
sum_squares.direction[i].opacity+=density_xy[x].direction[i].opacity*
density_xy[x].direction[i].opacity;
/*
Difference entropy.
*/
channel_features[RedChannel].difference_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenChannel].difference_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BlueChannel].difference_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].difference_entropy[i]-=
density_xy[x].direction[i].index*
MagickLog10(density_xy[x].direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].difference_entropy[i]-=
density_xy[x].direction[i].opacity*
MagickLog10(density_xy[x].direction[i].opacity);
/*
Information Measures of Correlation.
*/
entropy_x.direction[i].red-=(density_x[x].direction[i].red*
MagickLog10(density_x[x].direction[i].red));
entropy_x.direction[i].green-=(density_x[x].direction[i].green*
MagickLog10(density_x[x].direction[i].green));
entropy_x.direction[i].blue-=(density_x[x].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_x.direction[i].index-=(density_x[x].direction[i].index*
MagickLog10(density_x[x].direction[i].index));
if (image->matte != MagickFalse)
entropy_x.direction[i].opacity-=(density_x[x].direction[i].opacity*
MagickLog10(density_x[x].direction[i].opacity));
entropy_y.direction[i].red-=(density_y[x].direction[i].red*
MagickLog10(density_y[x].direction[i].red));
entropy_y.direction[i].green-=(density_y[x].direction[i].green*
MagickLog10(density_y[x].direction[i].green));
entropy_y.direction[i].blue-=(density_y[x].direction[i].blue*
MagickLog10(density_y[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_y.direction[i].index-=(density_y[x].direction[i].index*
MagickLog10(density_y[x].direction[i].index));
if (image->matte != MagickFalse)
entropy_y.direction[i].opacity-=(density_y[x].direction[i].opacity*
MagickLog10(density_y[x].direction[i].opacity));
}
/*
Difference variance.
*/
channel_features[RedChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].red)-
(variance.direction[i].red*variance.direction[i].red))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[GreenChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].green)-
(variance.direction[i].green*variance.direction[i].green))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[BlueChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].blue)-
(variance.direction[i].blue*variance.direction[i].blue))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].opacity)-
(variance.direction[i].opacity*variance.direction[i].opacity))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].index)-
(variance.direction[i].index*variance.direction[i].index))/
((double) number_grays*number_grays*number_grays*number_grays);
/*
Information Measures of Correlation.
*/
channel_features[RedChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/
(entropy_x.direction[i].red > entropy_y.direction[i].red ?
entropy_x.direction[i].red : entropy_y.direction[i].red);
channel_features[GreenChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/
(entropy_x.direction[i].green > entropy_y.direction[i].green ?
entropy_x.direction[i].green : entropy_y.direction[i].green);
channel_features[BlueChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/
(entropy_x.direction[i].blue > entropy_y.direction[i].blue ?
entropy_x.direction[i].blue : entropy_y.direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].index-entropy_xy1.direction[i].index)/
(entropy_x.direction[i].index > entropy_y.direction[i].index ?
entropy_x.direction[i].index : entropy_y.direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].opacity-entropy_xy1.direction[i].opacity)/
(entropy_x.direction[i].opacity > entropy_y.direction[i].opacity ?
entropy_x.direction[i].opacity : entropy_y.direction[i].opacity);
channel_features[RedChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].red-
entropy_xy.direction[i].red)))));
channel_features[GreenChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].green-
entropy_xy.direction[i].green)))));
channel_features[BlueChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].blue-
entropy_xy.direction[i].blue)))));
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].index-
entropy_xy.direction[i].index)))));
if (image->matte != MagickFalse)
channel_features[OpacityChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].opacity-
entropy_xy.direction[i].opacity)))));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
z;
for (z=0; z < (ssize_t) number_grays; z++)
{
register ssize_t
y;
ChannelStatistics
pixel;
(void) ResetMagickMemory(&pixel,0,sizeof(pixel));
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Contrast: amount of local variations present in an image.
*/
if (((y-x) == z) || ((x-y) == z))
{
pixel.direction[i].red+=cooccurrence[x][y].direction[i].red;
pixel.direction[i].green+=cooccurrence[x][y].direction[i].green;
pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
pixel.direction[i].index+=cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
pixel.direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
}
/*
Maximum Correlation Coefficient.
*/
Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red*
cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/
density_y[x].direction[i].red;
Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green*
cooccurrence[y][x].direction[i].green/
density_x[z].direction[i].green/density_y[x].direction[i].red;
Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue*
cooccurrence[y][x].direction[i].blue/density_x[z].direction[i].blue/
density_y[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
Q[z][y].direction[i].index+=cooccurrence[z][x].direction[i].index*
cooccurrence[y][x].direction[i].index/
density_x[z].direction[i].index/density_y[x].direction[i].index;
if (image->matte != MagickFalse)
Q[z][y].direction[i].opacity+=
cooccurrence[z][x].direction[i].opacity*
cooccurrence[y][x].direction[i].opacity/
density_x[z].direction[i].opacity/
density_y[x].direction[i].opacity;
}
}
channel_features[RedChannel].contrast[i]+=z*z*pixel.direction[i].red;
channel_features[GreenChannel].contrast[i]+=z*z*pixel.direction[i].green;
channel_features[BlueChannel].contrast[i]+=z*z*pixel.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackChannel].contrast[i]+=z*z*
pixel.direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].contrast[i]+=z*z*
pixel.direction[i].opacity;
}
/*
Maximum Correlation Coefficient.
Future: return second largest eigenvalue of Q.
*/
channel_features[RedChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[GreenChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[BlueChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
}
/*
Relinquish resources.
*/
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
return(channel_features);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H o u g h L i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use HoughLineImage() in conjunction with any binary edge extracted image (we
% recommand Canny) to identify lines in the image. The algorithm accumulates
% counts for every white pixel for every possible orientation (for angles from
% 0 to 179 in 1 degree increments) and distance from the center of the image to
% the corner (in 1 px increments) and stores the counts in an accumulator matrix
% of angle vs distance. The size of the accumulator is 180x(diagonal/2). Next
% it searches this space for peaks in counts and converts the locations of the
% peaks to slope and intercept in the normal x,y input image space. Use the
% slope/intercepts to find the endpoints clipped to the bounds of the image. The
% lines are then drawn. The counts are a measure of the length of the lines
%
% The format of the HoughLineImage method is:
%
% Image *HoughLineImage(const Image *image,const size_t width,
% const size_t height,const size_t threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find line pairs as local maxima in this neighborhood.
%
% o threshold: the line count threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport Image *HoughLineImage(const Image *image,const size_t width,
const size_t height,const size_t threshold,ExceptionInfo *exception)
{
#define HoughLineImageTag "HoughLine/Image"
CacheView
*image_view;
char
message[MaxTextExtent],
path[MaxTextExtent];
const char
*artifact;
double
hough_height;
Image
*lines_image = NULL;
ImageInfo
*image_info;
int
file;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*accumulator;
PointInfo
center;
register ssize_t
y;
size_t
accumulator_height,
accumulator_width,
line_count;
/*
Create the accumulator.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
accumulator_width=180;
hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ?
image->rows : image->columns))/2.0);
accumulator_height=(size_t) (2.0*hough_height);
accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height,
sizeof(double),exception);
if (accumulator == (MatrixInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (NullMatrix(accumulator) == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Populate the accumulator.
*/
status=MagickTrue;
progress=0;
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelIntensity(image,p) > (QuantumRange/2))
{
register ssize_t
i;
for (i=0; i < 180; i++)
{
double
count,
radius;
radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+
(((double) y-center.y)*sin(DegreesToRadians((double) i)));
(void) GetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
count++;
(void) SetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
}
}
p++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HoughLineImage)
#endif
proceed=SetImageProgress(image,HoughLineImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
/*
Generate line segments from accumulator.
*/
file=AcquireUniqueFileResource(path);
if (file == -1)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
(void) FormatLocaleString(message,MaxTextExtent,
"# Hough line transform: %.20gx%.20g%+.20g\n",(double) width,
(double) height,(double) threshold);
if ((size_t)write(file,message,strlen(message)) != strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MaxTextExtent,"viewbox 0 0 %.20g %.20g\n",
(double) image->columns,(double) image->rows);
if ((size_t)write(file,message,strlen(message)) != strlen(message))
status=MagickFalse;
line_count=image->columns > image->rows ? image->columns/4 : image->rows/4;
if (threshold != 0)
line_count=threshold;
for (y=0; y < (ssize_t) accumulator_height; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) accumulator_width; x++)
{
double
count;
(void) GetMatrixElement(accumulator,x,y,&count);
if (count >= (double) line_count)
{
double
maxima;
SegmentInfo
line;
ssize_t
v;
/*
Is point a local maxima?
*/
maxima=count;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((u != 0) || (v !=0))
{
(void) GetMatrixElement(accumulator,x+u,y+v,&count);
if (count > maxima)
{
maxima=count;
break;
}
}
}
if (u < (ssize_t) (width/2))
break;
}
(void) GetMatrixElement(accumulator,x,y,&count);
if (maxima > count)
continue;
if ((x >= 45) && (x <= 135))
{
/*
y = (r-x cos(t))/sin(t)
*/
line.x1=0.0;
line.y1=((double) (y-(accumulator_height/2.0))-((line.x1-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
line.x2=(double) image->columns;
line.y2=((double) (y-(accumulator_height/2.0))-((line.x2-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
}
else
{
/*
x = (r-y cos(t))/sin(t)
*/
line.y1=0.0;
line.x1=((double) (y-(accumulator_height/2.0))-((line.y1-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
line.y2=(double) image->rows;
line.x2=((double) (y-(accumulator_height/2.0))-((line.y2-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
}
(void) FormatLocaleString(message,MaxTextExtent,
"line %g,%g %g,%g # %g\n",line.x1,line.y1,line.x2,line.y2,maxima);
if ((size_t)write(file,message,strlen(message)) != strlen(message))
status=MagickFalse;
}
}
}
(void) close(file);
/*
Render lines to image canvas.
*/
image_info=AcquireImageInfo();
image_info->background_color=image->background_color;
(void) FormatLocaleString(image_info->filename,MaxTextExtent,"mvg:%s",path);
artifact=GetImageArtifact(image,"background");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"background",artifact);
artifact=GetImageArtifact(image,"fill");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"fill",artifact);
artifact=GetImageArtifact(image,"stroke");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"stroke",artifact);
artifact=GetImageArtifact(image,"strokewidth");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"strokewidth",artifact);
lines_image=ReadImage(image_info,exception);
artifact=GetImageArtifact(image,"hough-lines:accumulator");
if ((lines_image != (Image *) NULL) &&
(IsMagickTrue(artifact) != MagickFalse))
{
Image
*accumulator_image;
accumulator_image=MatrixToImage(accumulator,exception);
if (accumulator_image != (Image *) NULL)
AppendImageToList(&lines_image,accumulator_image);
}
/*
Free resources.
*/
accumulator=DestroyMatrixInfo(accumulator);
image_info=DestroyImageInfo(image_info);
(void) RelinquishUniqueFileResource(path);
return(GetFirstImageInList(lines_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M e a n S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MeanShiftImage() delineate arbitrarily shaped clusters in the image. For
% each pixel, it visits all the pixels in the neighborhood specified by
% the window centered at the pixel and excludes those that are outside the
% radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those
% that are within the specified color distance from the current mean, and
% computes a new x,y centroid from those coordinates and a new mean. This new
% x,y centroid is used as the center for a new window. This process iterates
% until it converges and the final mean is replaces the (original window
% center) pixel value. It repeats this process for the next pixel, etc.,
% until it processes all pixels in the image. Results are typically better with
% colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr.
%
% The format of the MeanShiftImage method is:
%
% Image *MeanShiftImage(const Image *image,const size_t width,
% const size_t height,const double color_distance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find pixels in this neighborhood.
%
% o color_distance: the color distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MeanShiftImage(const Image *image,const size_t width,
const size_t height,const double color_distance,ExceptionInfo *exception)
{
#define MaxMeanShiftIterations 100
#define MeanShiftImageTag "MeanShift/Image"
CacheView
*image_view,
*mean_view,
*pixel_view;
Image
*mean_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
mean_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (mean_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(mean_image,DirectClass) == MagickFalse)
{
InheritException(exception,&mean_image->exception);
mean_image=DestroyImage(mean_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
pixel_view=AcquireVirtualCacheView(image,exception);
mean_view=AcquireAuthenticCacheView(mean_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status,progress) \
magick_threads(mean_image,mean_image,mean_image->rows,1)
#endif
for (y=0; y < (ssize_t) mean_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) mean_image->columns; x++)
{
MagickPixelPacket
mean_pixel,
previous_pixel;
PointInfo
mean_location,
previous_location;
register ssize_t
i;
GetMagickPixelPacket(image,&mean_pixel);
SetMagickPixelPacket(image,p,indexes+x,&mean_pixel);
mean_location.x=(double) x;
mean_location.y=(double) y;
for (i=0; i < MaxMeanShiftIterations; i++)
{
double
distance,
gamma;
MagickPixelPacket
sum_pixel;
PointInfo
sum_location;
ssize_t
count,
v;
sum_location.x=0.0;
sum_location.y=0.0;
GetMagickPixelPacket(image,&sum_pixel);
previous_location=mean_location;
previous_pixel=mean_pixel;
count=0;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2)))
{
PixelPacket
pixel;
status=GetOneCacheViewVirtualPixel(pixel_view,(ssize_t)
MagickRound(mean_location.x+u),(ssize_t) MagickRound(
mean_location.y+v),&pixel,exception);
distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+
(mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+
(mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue);
if (distance <= (color_distance*color_distance))
{
sum_location.x+=mean_location.x+u;
sum_location.y+=mean_location.y+v;
sum_pixel.red+=pixel.red;
sum_pixel.green+=pixel.green;
sum_pixel.blue+=pixel.blue;
sum_pixel.opacity+=pixel.opacity;
count++;
}
}
}
}
gamma=1.0/count;
mean_location.x=gamma*sum_location.x;
mean_location.y=gamma*sum_location.y;
mean_pixel.red=gamma*sum_pixel.red;
mean_pixel.green=gamma*sum_pixel.green;
mean_pixel.blue=gamma*sum_pixel.blue;
mean_pixel.opacity=gamma*sum_pixel.opacity;
distance=(mean_location.x-previous_location.x)*
(mean_location.x-previous_location.x)+
(mean_location.y-previous_location.y)*
(mean_location.y-previous_location.y)+
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)*
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)*
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)*
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue);
if (distance <= 3.0)
break;
}
q->red=ClampToQuantum(mean_pixel.red);
q->green=ClampToQuantum(mean_pixel.green);
q->blue=ClampToQuantum(mean_pixel.blue);
q->opacity=ClampToQuantum(mean_pixel.opacity);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MeanShiftImage)
#endif
proceed=SetImageProgress(image,MeanShiftImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
mean_view=DestroyCacheView(mean_view);
pixel_view=DestroyCacheView(pixel_view);
image_view=DestroyCacheView(image_view);
return(mean_image);
}
|
DRB001-antidep1-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A loop with loop-carried anti-dependence.
Data race pair: a[i+1]@64:10 vs. a[i]@64:5
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc,char *argv[])
{
int i;
int len = 1000;
int a[1000];
#pragma omp parallel for private (i)
for (i = 0; i <= len - 1; i += 1) {
a[i] = i;
}
for (i = 0; i <= len - 1 - 1; i += 1) {
a[i] = a[i + 1] + 1;
}
printf("a[500]=%d\n",a[500]);
return 0;
}
|
5400.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose
void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) {
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 0; t2 <= tmax - 1; t2 += 1) {
for (t4 = 0; t4 <= ny - 1; t4 += 1)
ey[0][t4] = _fict_[t2];
for (t4 = 1; t4 <= nx - 1; t4 += 1)
for (t6 = 0; t6 <= ny - 1; t6 += 1)
ey[t4][t6] = ey[t4][t6] - 0.5 * (hz[t4][t6] - hz[t4 - 1][t6]);
#pragma omp parallel for private(t4,t6,t8,t10)
for (t4 = 0; t4 <= nx - 1; t4 += 32)
for (t6 = t4; t6 <= (t4 + 31 < nx - 1 ? t4 + 31 : nx - 1); t6 += 1)
for (t8 = 1; t8 <= ny - 1; t8 += 16)
for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1)
ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]);
#pragma omp parallel for private(t4,t6,t8,t10)
for (t4 = 0; t4 <= nx - 2; t4 += 32)
for (t6 = t4; t6 <= (t4 + 31 < nx - 2 ? t4 + 31 : nx - 2); t6 += 1)
for (t8 = 0; t8 <= ny - 2; t8 += 16)
for (t10 = t8; t10 <= (ny - 2 < t8 + 15 ? ny - 2 : t8 + 15); t10 += 1)
hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]);
}
}
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// \brief This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
/// \brief Kind of the directive.
OpenMPDirectiveKind Kind;
/// \brief Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// \brief Ending location of the directive.
SourceLocation EndLoc;
/// \brief Numbers of clauses.
const unsigned NumClauses;
/// \brief Number of child expressions/stmts.
const unsigned NumChildren;
/// \brief Offset from this to the start of clauses.
/// There are NumClauses pointers to clauses, they are followed by
/// NumChildren pointers to child stmts/exprs (if the directive type
/// requires an associated stmt, then it has to be the first of them).
const unsigned ClausesOffset;
/// \brief Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>(
reinterpret_cast<char *>(this) + ClausesOffset);
return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses);
}
protected:
/// \brief Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
template <typename T>
OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses, unsigned NumChildren)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)), NumClauses(NumClauses),
NumChildren(NumChildren),
ClausesOffset(llvm::RoundUpToAlignment(sizeof(T),
llvm::alignOf<OMPClause *>())) {}
/// \brief Sets the list of variables for this clause.
///
/// \param Clauses The list of clauses for the directive.
///
void setClauses(ArrayRef<OMPClause *> Clauses);
/// \brief Set the associated statement for the directive.
///
/// /param S Associated statement.
///
void setAssociatedStmt(Stmt *S) {
assert(hasAssociatedStmt() && "no associated statement.");
*child_begin() = S;
}
public:
/// \brief Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only those declarations that meet some run-time
/// criteria.
template <class FilterPredicate> class filtered_clause_iterator {
ArrayRef<OMPClause *>::const_iterator Current;
ArrayRef<OMPClause *>::const_iterator End;
FilterPredicate Pred;
void SkipToNextClause() {
while (Current != End && !Pred(*Current))
++Current;
}
public:
typedef const OMPClause *value_type;
filtered_clause_iterator() : Current(), End() {}
filtered_clause_iterator(ArrayRef<OMPClause *> Arr, FilterPredicate Pred)
: Current(Arr.begin()), End(Arr.end()), Pred(Pred) {
SkipToNextClause();
}
value_type operator*() const { return *Current; }
value_type operator->() const { return *Current; }
filtered_clause_iterator &operator++() {
++Current;
SkipToNextClause();
return *this;
}
filtered_clause_iterator operator++(int) {
filtered_clause_iterator tmp(*this);
++(*this);
return tmp;
}
bool operator!() { return Current == End; }
operator bool() { return Current != End; }
};
/// \brief Gets a single clause of the specified kind \a K associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of kind \a K is associated with
/// the directive.
const OMPClause *getSingleClause(OpenMPClauseKind K) const;
/// \brief Returns starting location of directive kind.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns ending location of directive.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Get number of clauses.
unsigned getNumClauses() const { return NumClauses; }
/// \brief Returns specified clause.
///
/// \param i Number of clause.
///
OMPClause *getClause(unsigned i) const { return clauses()[i]; }
/// \brief Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return NumChildren > 0; }
/// \brief Returns statement associated with the directive.
Stmt *getAssociatedStmt() const {
assert(hasAssociatedStmt() && "no associated statement.");
return const_cast<Stmt *>(*child_begin());
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!hasAssociatedStmt())
return child_range();
Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end());
return child_range(ChildStorage, ChildStorage + NumChildren);
}
ArrayRef<OMPClause *> clauses() { return getClauses(); }
ArrayRef<OMPClause *> clauses() const {
return const_cast<OMPExecutableDirective *>(this)->getClauses();
}
};
/// \brief This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// \brief This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Number of collapsed loops as specified by 'collapse' clause.
unsigned CollapsedNum;
/// \brief Offsets to the stored exprs.
enum {
AssociatedStmtOffset = 0,
IterationVariableOffset = 1,
LastIterationOffset = 2,
CalcLastIterationOffset = 3,
PreConditionOffset = 4,
CondOffset = 5,
SeparatedCondOffset = 6,
InitOffset = 7,
IncOffset = 8,
ArraysOffset = 9
};
/// \brief Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
Expr **Storage =
reinterpret_cast<Expr **>(&(*(std::next(child_begin(), ArraysOffset))));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(), ArraysOffset + CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(), ArraysOffset + 2 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
protected:
/// \brief Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
/// \param NumClauses Number of clauses.
/// \param NumSpecialChildren Number of additional directive-specific stmts.
///
template <typename T>
OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses,
unsigned NumSpecialChildren = 0)
: OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses,
numLoopChildren(CollapsedNum) +
NumSpecialChildren),
CollapsedNum(CollapsedNum) {}
/// \brief Children number.
static unsigned numLoopChildren(unsigned CollapsedNum) {
return ArraysOffset + 3 * CollapsedNum; // Counters, Updates and Finals
}
void setIterationVariable(Expr *IV) {
*std::next(child_begin(), IterationVariableOffset) = IV;
}
void setLastIteration(Expr *LI) {
*std::next(child_begin(), LastIterationOffset) = LI;
}
void setCalcLastIteration(Expr *CLI) {
*std::next(child_begin(), CalcLastIterationOffset) = CLI;
}
void setPreCond(Expr *PC) {
*std::next(child_begin(), PreConditionOffset) = PC;
}
void setCond(Expr *Cond, Expr *SeparatedCond) {
*std::next(child_begin(), CondOffset) = Cond;
*std::next(child_begin(), SeparatedCondOffset) = SeparatedCond;
}
void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; }
void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; }
void setCounters(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
public:
/// \brief Get number of collapsed loops.
unsigned getCollapsedNumber() const { return CollapsedNum; }
Expr *getIterationVariable() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IterationVariableOffset)));
}
Expr *getLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LastIterationOffset)));
}
Expr *getCalcLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CalcLastIterationOffset)));
}
Expr *getPreCond() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PreConditionOffset)));
}
Expr *getCond(bool SeparateIter) const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(),
(SeparateIter ? SeparatedCondOffset : CondOffset))));
}
Expr *getInit() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset)));
}
Expr *getInc() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset)));
}
const Stmt *getBody() const {
// This relies on the loop form is already checked by Sema.
Stmt *Body = getAssociatedStmt()->IgnoreContainers(true);
Body = cast<ForStmt>(Body)->getBody();
for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) {
Body = Body->IgnoreContainers();
Body = cast<ForStmt>(Body)->getBody();
}
return Body;
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param IV Loop iteration variable for CodeGen.
/// \param LastIteration Loop last iteration number for CodeGen.
/// \param CalcLastIteration Calculation of last iteration.
/// \param PreCond Pre-condition.
/// \param Cond Condition.
/// \param SeparatedCond Condition with 1 iteration separated.
/// \param Inc Loop increment.
/// \param Counters Loop counters.
/// \param Updates Expressions for loop counters update for CodeGen.
/// \param Finals Final loop counter values for GodeGen.
///
static OMPSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, Expr *IV, Expr *LastIteration,
Expr *CalcLastIteration, Expr *PreCond, Expr *Cond,
Expr *SeparatedCond, Expr *Init, Expr *Inc, ArrayRef<Expr *> Counters,
ArrayRef<Expr *> Updates, ArrayRef<Expr *> Finals);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param IV Loop iteration variable for CodeGen.
/// \param LastIteration Loop last iteration number for CodeGen.
/// \param CalcLastIteration Calculation of last iteration.
/// \param PreCond Pre-condition.
/// \param Cond Condition.
/// \param SeparatedCond Condition with 1 iteration separated.
/// \param Inc Loop increment.
/// \param Counters Loop counters.
/// \param Updates Expressions for loop counters update for CodeGen.
/// \param Finals Final loop counter values for GodeGen.
///
static OMPForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, Expr *IV, Expr *LastIteration,
Expr *CalcLastIteration, Expr *PreCond, Expr *Cond,
Expr *SeparatedCond, Expr *Init, Expr *Inc, ArrayRef<Expr *> Counters,
ArrayRef<Expr *> Updates, ArrayRef<Expr *> Finals);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// \brief This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param IV Loop iteration variable for CodeGen.
/// \param LastIteration Loop last iteration number for CodeGen.
/// \param CalcLastIteration Calculation of last iteration.
/// \param PreCond Pre-condition.
/// \param Cond Condition.
/// \param SeparatedCond Condition with 1 iteration separated.
/// \param Inc Loop increment.
/// \param Counters Loop counters.
/// \param Updates Expressions for loop counters update for CodeGen.
/// \param Finals Final loop counter values for GodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, Expr *IV, Expr *LastIteration,
Expr *CalcLastIteration, Expr *PreCond, Expr *Cond,
Expr *SeparatedCond, Expr *Init, Expr *Inc, ArrayRef<Expr *> Counters,
ArrayRef<Expr *> Updates, ArrayRef<Expr *> Finals);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// \brief This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSingleDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// \brief This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// \brief This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Name of the directive.
DeclarationNameInfo DirName;
/// \brief Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
StartLoc, EndLoc, 0, 1),
DirName(Name) {}
/// \brief Build an empty directive.
///
explicit OMPCriticalDirective()
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
SourceLocation(), SourceLocation(), 0, 1),
DirName() {}
/// \brief Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// \brief Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param IV Loop iteration variable for CodeGen.
/// \param LastIteration Loop last iteration number for CodeGen.
/// \param CalcLastIteration Calculation of last iteration.
/// \param PreCond Pre-condition.
/// \param Cond Condition.
/// \param SeparatedCond Condition with 1 iteration separated.
/// \param Inc Loop increment.
/// \param Counters Loop counters.
/// \param Updates Expressions for loop counters update for CodeGen.
/// \param Finals Final loop counter values for GodeGen.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, Expr *IV, Expr *LastIteration,
Expr *CalcLastIteration, Expr *PreCond, Expr *Cond,
Expr *SeparatedCond, Expr *Init, Expr *Inc, ArrayRef<Expr *> Counters,
ArrayRef<Expr *> Updates, ArrayRef<Expr *> Finals);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param IV Loop iteration variable for CodeGen.
/// \param LastIteration Loop last iteration number for CodeGen.
/// \param CalcLastIteration Calculation of last iteration.
/// \param PreCond Pre-condition.
/// \param Cond Condition.
/// \param SeparatedCond Condition with 1 iteration separated.
/// \param Inc Loop increment.
/// \param Counters Loop counters.
/// \param Updates Expressions for loop counters update for CodeGen.
/// \param Finals Final loop counter values for GodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, Expr *IV, Expr *LastIteration,
Expr *CalcLastIteration, Expr *PreCond, Expr *Cond,
Expr *SeparatedCond, Expr *Init, Expr *Inc, ArrayRef<Expr *> Counters,
ArrayRef<Expr *> Updates, ArrayRef<Expr *> Finals);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, StartLoc, EndLoc,
NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc,
EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTaskDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// \brief This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// \brief This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
StartLoc, EndLoc, NumClauses, 0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPFlushDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
SourceLocation(), SourceLocation(), NumClauses,
0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// \brief This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPOrderedDirective()
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// \brief This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
StartLoc, EndLoc, NumClauses, 4) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPAtomicDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
SourceLocation(), SourceLocation(), NumClauses,
4) {}
/// \brief Set 'x' part of the associated expression/statement.
void setX(Expr *X) { *std::next(child_begin()) = X; }
/// \brief Set 'v' part of the associated expression/statement.
void setV(Expr *V) { *std::next(child_begin(), 2) = V; }
/// \brief Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { *std::next(child_begin(), 3) = E; }
public:
/// \brief Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param X 'x' part of the associated expression/statement.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
///
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Get 'x' part of the associated expression/statement.
Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); }
const Expr *getX() const {
return cast_or_null<Expr>(*std::next(child_begin()));
}
/// \brief Get 'v' part of the associated expression/statement.
Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 2)); }
const Expr *getV() const {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
/// \brief Get 'expr' part of the associated expression/statement.
Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); }
const Expr *getExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 3));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// \brief This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// \brief This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
} // end namespace clang
#endif
|
GB_unaryop__ainv_fp32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp32_uint64
// op(A') function: GB_tran__ainv_fp32_uint64
// C type: float
// A type: uint64_t
// cast: float cij = (float) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp32_uint64
(
float *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
math.h | /*===---- openmp_wrapper/math.h -------- OpenMP math.h intercept ------ c++ -===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
// If we are in C++ mode and include <math.h> (not <cmath>) first, we still need
// to make sure <cmath> is read first. The problem otherwise is that we haven't
// seen the declarations of the math.h functions when the system math.h includes
// our cmath overlay. However, our cmath overlay, or better the underlying
// overlay, e.g. CUDA, uses the math.h functions. Since we haven't declared them
// yet we get errors. CUDA avoids this by eagerly declaring all math functions
// (in the __device__ space) but we cannot do this. Instead we break the
// dependence by forcing cmath to go first. While our cmath will in turn include
// this file, the cmath guards will prevent recursion.
#ifdef __cplusplus
#include <cmath>
#endif
#ifndef __CLANG_OPENMP_MATH_H__
#define __CLANG_OPENMP_MATH_H__
#ifndef _OPENMP
#error "This file is for OpenMP compilation only."
#endif
#include_next <math.h>
// We need limits.h for __clang_cuda_math.h below and because it should not hurt
// we include it eagerly here.
#include <limits.h>
// We need stdlib.h because (for now) __clang_cuda_math.h below declares `abs`
// which should live in stdlib.h.
#include <stdlib.h>
#pragma omp begin declare variant match( \
device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
#define __CUDA__
#define __OPENMP_NVPTX__
#include <__clang_cuda_math.h>
#undef __OPENMP_NVPTX__
#undef __CUDA__
#pragma omp end declare variant
#endif
|
test8.c | int g1;
void foo (int a) {
0+g1;
if (1) {
g1=2;
#pragma omp barrier
3+g1;
} else {
g1=4;
foo(3);
g1+5;
}
}
int main() {
#pragma omp parallel
{
6+g1;
if (7) {
g1=8;
foo(9);
g1=10;
} else {
11+g1;
#pragma omp barrier
g1=12;
#pragma omp barrier
g1=13+g1;
}
g1=14;
}
}
|
GB_unop__cosh_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__cosh_fp32_fp32)
// op(A') function: GB (_unop_tran__cosh_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = coshf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = coshf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = coshf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COSH || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__cosh_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = coshf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = coshf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__cosh_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lock.c | #include <stdio.h>
#include <omp.h>
static omp_lock_t lock;
int main(int argc, char const *argv[])
{
/* code */
omp_init_lock(&lock);
#pragma omp parallel for
for (size_t i = 0; i < 5; i++)
{
omp_set_lock(&lock);
printf("%d+\n", omp_get_thread_num());
printf("%d-\n", omp_get_thread_num());
omp_unset_lock(&lock);
}
// always use a ref in lock operations
omp_destroy_lock(&lock);
return 0;
}
|
convolution_1x1_pack8to1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack8to1_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack8to1_avx(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_sgemm_pack8to1_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _v = _mm256_load_ps(r0);
_mm256_store_ps(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8to1_avx(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
dropout-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file dropout-inl.h
* \brief
* \author Bing Xu, Da Zheng, Hang Zhang
*/
#ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_
#define MXNET_OPERATOR_NN_DROPOUT_INL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../random/sampler.h"
#include "../tensor/elemwise_binary_broadcast_op.h"
#if (MSHADOW_USE_MKL == 1) && defined(_OPENMP) && !defined(__CUDACC__)
#define MXNET_USE_MKL_DROPOUT 1
#endif
#if MXNET_USE_MKL_DROPOUT
#include <omp.h>
#include <mkl_vml_functions.h>
#include <mkl_vsl.h>
#endif // MXNET_USE_MKL_DROPOUT
#define MXNET_USE_CUDNN_DROPOUT MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 7
namespace dropout {
enum DropoutOpInputs {kData};
enum DropoutOpOutputs {kOut, kMask};
enum DropoutOpForwardResource {kRandom};
enum DropoutOpMode {kTraining, kAlways};
} // namespace dropout
namespace mxnet {
namespace op {
const int MAX_DIM = 5;
struct DropoutParam : public dmlc::Parameter<DropoutParam> {
float p;
int mode;
mxnet::TShape axes;
dmlc::optional<bool> cudnn_off;
DMLC_DECLARE_PARAMETER(DropoutParam) {
DMLC_DECLARE_FIELD(p).set_default(0.5)
.set_range(0, 1)
.describe("Fraction of the input that gets dropped out during training time.");
DMLC_DECLARE_FIELD(mode)
.add_enum("training", dropout::kTraining)
.add_enum("always", dropout::kAlways)
.set_default(dropout::kTraining)
.describe("Whether to only turn on dropout during training or to also turn on for inference.");
DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, 0))
.describe("Axes for variational dropout kernel.");
DMLC_DECLARE_FIELD(cudnn_off).set_default(dmlc::optional<bool>(false))
.describe("Whether to turn off cudnn in dropout operator. "
"This option is ignored if axes is specified.");
}
}; // struct DropoutParam
template<typename xpu, typename DType>
class DropoutOp {
#if MXNET_USE_MKL_DROPOUT
static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen,
int n, double p, int* r) {
typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1);
const int seed = 17 + abs(genImpl.rand() % 4096);
CHECK_GE(seed, 0);
const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel num_threads(nthr)
{
const int ithr = omp_get_thread_num();
const int avg_amount = (n + nthr - 1) / nthr;
const int my_offset = ithr * avg_amount;
const int my_amount = std::min(my_offset + avg_amount, n) - my_offset;
if (my_amount > 0) {
VSLStreamStatePtr stream;
vslNewStream(&stream, VSL_BRNG_MCG31, seed);
vslSkipAheadStream(stream, my_offset);
viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p);
vslDeleteStream(&stream);
}
}
}
static inline bool MKLAvailable() {
// BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer
// will be too small, so we can;t use MKL in those cases
return sizeof(DType) >= sizeof(int);
}
// MKL forward pass
inline void MKLForward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data) {
Stream<xpu> *s = ctx.get_stream<xpu>();
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s);
DType *outptr = out.dptr_;
DType *dataptr = data.dptr_;
auto maskptr = reinterpret_cast<int *>(mask.dptr_);
int count = mask.shape_[0] * mask.shape_[1];
if (sizeof(DType) > sizeof(int)) {
// allocating new buffer to avoiding memory overlapping between `mask.dptr_` and `maskptr`
Tensor<xpu, 1, int> temp = ctx.requested[1].get_space_typed<xpu, 1, int>(Shape1(count), s);
maskptr = temp.dptr_;
}
BernoulliGenerate(*pgen, count, this->pkeep_, maskptr);
const float pk_1 = 1.0f / this->pkeep_;
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
const DType maskVal = static_cast<DType>(maskptr[i]) * pk_1;
outptr[i] = dataptr[i] * maskVal;
mask.dptr_[i] = maskVal;
}
}
// MKL backward pass
inline void MKLBackward(const OpContext &ctx,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &out_grad) {
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s);
DType *ingradptr = gdata.dptr_;
const DType *outgradptr = grad.dptr_;
const DType *maskptr = mask.dptr_;
const int count = mask.shape_[0] * mask.shape_[1];
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
ingradptr[i] = outgradptr[i] * maskptr[i];
}
}
#endif // #if MXNET_USE_MKL_DROPOUT
public:
/*!
* \brief Dropout kernel, compute dropout tensor
*/
struct DropoutKernel {
/*!
* \brief Dropout kernel function
* \param id Thread number (0-based representing count)
* \param gen Random number generator
* \param N Total number of items in the output
* \param step Step between items, related to parallelism
* \param dropout_out Output dropout values
* \param mask_out Output mask (is multiplied to create dropout output, may be 0)
* \param input_data Input data to perform the dropout on
* \param pkeep Dropout rate (keep when the generated random number is less than this value)
*/
MSHADOW_XINLINE static void Map(index_t id,
RandGenerator<xpu, DType> gen,
const index_t N,
const index_t step,
DType *dropout_out,
DType *mask_out,
const DType *input_data,
const real_t pkeep) {
RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
const real_t rand_num = static_cast<real_t>(genImpl.uniform());
mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep);
dropout_out[i] = input_data[i] * mask_out[i];
});
}
};
struct BernoulliKernel {
/*! \brief Bernoulli kernel for generating mask */
MSHADOW_XINLINE static void Map(index_t id,
RandGenerator<xpu, DType> gen,
const index_t N,
const index_t step,
DType *mask_out,
const real_t pkeep) {
RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
const real_t rand_num = static_cast<real_t>(genImpl.uniform());
mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep);
});
}
};
explicit DropoutOp(const DropoutParam ¶m, Context ctx) {
this->pkeep_ = 1.0f - param.p;
this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode);
this->axes_ = param.axes;
this->dropout_passthrough_ = true;
#if MXNET_USE_CUDNN_DROPOUT
this->cudnn_off_ = param.cudnn_off && param.cudnn_off.value();
this->ctx_ = ctx;
if (ctx.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) {
dtype_ = mshadow::DataType<DType>::kCudnnFlag;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc_));
CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_));
}
#endif // MXNET_USE_CUDNN_DROPOUT
}
~DropoutOp() {
#if MXNET_USE_CUDNN_DROPOUT
if (this->ctx_.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) {
CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(dx_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(dy_desc_));
CUDNN_CALL(cudnnDestroyDropoutDescriptor(dropout_desc_));
}
#endif // MXNET_USE_CUDNN_DROPOUT
}
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
inline bool CuDNNAvailable() {
return this->pkeep_ > 0 && !this->cudnn_off_;
}
inline void CuDNNForward(const OpContext &ctx,
const TBlob &in,
const TBlob &mask,
const TBlob &out) {
Stream<xpu> *s = ctx.get_stream<xpu>();
// set dropout state.
ctx.requested[0].get_cudnn_dropout_desc(&dropout_desc_, s, 1.0f - this->pkeep_, seed_);
// describe input/output tensor
int dim[4], stride[4];
dim[0] = 1;
dim[1] = 1;
dim[2] = 1;
dim[3] = out.Size();
stride[0] = out.Size();
stride[1] = out.Size();
stride[2] = out.Size();
stride[3] = 1;
CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_,
dtype_,
4,
dim,
stride));
CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_,
dtype_,
4,
dim,
stride));
// perform dropout with cudnn
CUDNN_CALL(cudnnDropoutGetReserveSpaceSize(x_desc_, &dropout_reserve_byte_));
// cudnn uses bits to record the positions that are dropped, so reserve bytes is always
// 1/8 of input size.
CHECK_GE(mask.Size() * sizeof(DType), dropout_reserve_byte_) <<
"The size of the mask space is smaller than the required cudnn reserved space.";
CUDNN_CALL(cudnnDropoutForward(s->dnn_handle_,
dropout_desc_,
x_desc_,
in.dptr<DType>(),
y_desc_,
out.dptr<DType>(),
mask.dptr<DType>(),
dropout_reserve_byte_));
}
inline void CuDNNBackward(const OpContext &ctx,
const TBlob &out_grad,
const TBlob &mask,
const TBlob &in_grad) {
Stream<xpu> *s = ctx.get_stream<xpu>();
// describe input/output tensor
int dim[4], stride[4];
dim[0] = 1;
dim[1] = 1;
dim[2] = 1;
dim[3] = in_grad.Size();
stride[0] = in_grad.Size();
stride[1] = in_grad.Size();
stride[2] = in_grad.Size();
stride[3] = 1;
CUDNN_CALL(cudnnSetTensorNdDescriptor(dy_desc_,
dtype_,
4,
dim,
stride));
CUDNN_CALL(cudnnSetTensorNdDescriptor(dx_desc_,
dtype_,
4,
dim,
stride));
// perform dropout with cudnn
CUDNN_CALL(cudnnDropoutBackward(s->dnn_handle_,
dropout_desc_,
dy_desc_,
out_grad.dptr<DType>(),
dx_desc_,
in_grad.dptr<DType>(),
mask.dptr<DType>(),
dropout_reserve_byte_));
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data) {
this->dropout_passthrough_ = true;
if (req[dropout::kOut] != kNullOp) {
CHECK_EQ(in_data.size(), 1U);
if (ctx.is_train) {
CHECK_EQ(out_data.size(), 2U);
}
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob &in = in_data[dropout::kData];
const TBlob &out = out_data[dropout::kOut];
const TBlob &mask = out_data[dropout::kMask];
if (this->pkeep_ < 1 && (ctx.is_train || this->mode_ == dropout::kAlways)) {
this->dropout_passthrough_ = false;
if (this->axes_.ndim() == 0) {
#if MXNET_USE_MKL_DROPOUT
if (MKLAvailable()) {
MKLForward(ctx, in_data, out_data);
return;
}
#endif // MXNET_USE_MKL_DROPOUT
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
if (CuDNNAvailable()) {
CuDNNForward(ctx, in, mask, out);
return;
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
CHECK(req[dropout::kOut] != kAddTo);
LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(),
out.dptr<DType>(),
mask.dptr<DType>(),
in.dptr<DType>(),
this->pkeep_);
return;
} else {
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
// initialize the mask
LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(),
mask.dptr<DType>(),
this->pkeep_);
// broadcast mul
mxnet::TShape new_lshape, new_rshape, new_oshape;
int ndim = BinaryBroadcastShapeCompact(in.shape_,
mask.shape_, out.shape_,
&new_lshape, &new_rshape, &new_oshape);
if (!ndim) {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, out.Size(), out.dptr<DType>(), in.dptr<DType>(),
mask.dptr<DType>());
});
} else {
BROADCAST_NDIM_SWITCH(ndim, NDim, {
mshadow::Shape<NDim> oshape = new_oshape.get<NDim>();
mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>());
mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>::
template LaunchEx(s, new_oshape.Size(), req[dropout::kOut],
lstride, rstride, oshape,
in.dptr<DType>(),
mask.dptr<DType>(), out.dptr<DType>());
});
}
}
} else {
if (req[dropout::kOut] == kWriteInplace) return;
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, out.Size(), out.dptr<DType>(), in.dptr<DType>());
});
}
}
}
void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
if (!this->dropout_passthrough_) {
this->dropout_passthrough_ = true;
const TBlob &gdata = in_grad[dropout::kData];
const TBlob &grad = out_grad[dropout::kOut];
const TBlob &mask = out_data[dropout::kMask];
if (this->axes_.ndim() == 0) {
#if MXNET_USE_MKL_DROPOUT
if (MKLAvailable()) {
MKLBackward(ctx, in_grad, out_data, out_grad);
return;
}
#endif // MXNET_USE_MKL_DROPOUT
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
if (CuDNNAvailable()) {
CuDNNBackward(ctx, grad, mask, gdata);
return;
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
// standard case for dropout
CHECK_EQ(grad.Size(), mask.Size());
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>());
});
return;
} else {
// broardcast mul
mxnet::TShape new_lshape, new_rshape, new_oshape;
int ndim = BinaryBroadcastShapeCompact(grad.shape_,
mask.shape_, gdata.shape_,
&new_lshape, &new_rshape, &new_oshape);
if (!ndim) {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>());
});
} else {
BROADCAST_NDIM_SWITCH(ndim, NDim, {
mshadow::Shape<NDim> oshape = new_oshape.get<NDim>();
mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>());
mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>::
template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape,
grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>());
});
}
}
} else {
const TBlob& gdata = in_grad[dropout::kData];
const TBlob& grad = out_grad[dropout::kOut];
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>());
});
}
}
private:
/*! \brief Dropout rate (keep when the generated random number is less than this value) */
real_t pkeep_;
/*! \brief Dropout mode */
dropout::DropoutOpMode mode_;
/*! \brief Axes on which dropout mask is shared in the form of broadcast multiply */
mxnet::TShape axes_;
/*! \brief Flag to record whether forward is executed in pass-through mode */
bool dropout_passthrough_;
#if MXNET_USE_CUDNN_DROPOUT
bool cudnn_off_;
Context ctx_;
cudnnDataType_t dtype_;
cudnnDropoutDescriptor_t dropout_desc_;
uint64_t seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
size_t dropout_reserve_byte_;
cudnnTensorDescriptor_t x_desc_, y_desc_, dx_desc_, dy_desc_;
#endif // MXNET_USE_CUDNN_DROPOUT
}; // class DropoutOp
template<typename xpu>
void DropoutCompute(const OpStatePtr& state,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>();
op.Forward(ctx, inputs, req, outputs);
});
}
template<typename xpu>
void DropoutGradCompute(const OpStatePtr& state,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1);
CHECK_EQ(req.size(), 1);
std::vector<TBlob> out_grads(2);
std::vector<TBlob> out_data(2);
out_grads[dropout::kOut] = inputs[0];
out_data[dropout::kMask] = inputs[1];
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>();
op.Backward(ctx, out_grads, out_data, req, outputs);
});
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
|
cfunction.c | #include "core/cfunction.h"
#include <stdlib.h>
#include <omp.h>
#include <math.h>
#include "util/pi.h"
#include "struct/coordlist.h"
#define OUTPUT_MAP_INITIAL_CAP 5
void _set_out_pixel(
const rgba_image *input, rgba_image *output,
size_t x, size_t y,
coord_list *clist) {
uint32_t sum_r = 0;
uint32_t sum_g = 0;
uint32_t sum_b = 0;
uint32_t sum_a = 0;
size_t len;
size_t i;
const coord *coord_arr;
coord c;
rgba_pixel in_pixel;
rgba_pixel out_pixel;
if (clist != NULL) {
coord_arr = clist_as_array(clist, &len);
for (i = 0; i < len; i++) {
c = coord_arr[i];
if (rgbaimg_get_pixel(input, c.x, c.y, &in_pixel) == 0) {
sum_r += in_pixel.r * in_pixel.a;
sum_g += in_pixel.g * in_pixel.a;
sum_b += in_pixel.b * in_pixel.a;
sum_a += in_pixel.a;
}
}
}
if (sum_a > 0) {
out_pixel.r = sum_r / sum_a;
out_pixel.g = sum_g / sum_a;
out_pixel.b = sum_b / sum_a;
if (sum_a > UINT8_MAX) {
sum_a = UINT8_MAX;
}
out_pixel.a = sum_a;
} else {
out_pixel.r = 0;
out_pixel.g = 0;
out_pixel.b = 0;
out_pixel.a = 0;
}
rgbaimg_set_pixel(output, x, y, out_pixel);
}
double complex f_identity(double complex z, const void *arg) {
(void) arg;
return z;
}
double complex f_null(double complex z, const void *arg) {
(void) z;
(void) arg;
return 0;
}
double complex coord_to_complex(
const rgba_image *img,
size_t inx, size_t iny,
double complex min, double complex max) {
size_t w, h;
double tx, ty;
double x, y;
double x_min = creal(min);
double x_max = creal(max);
double y_min = cimag(min);
double y_max = cimag(max);
rgbaimg_get_dimensions(img, &w, &h);
tx = (double) inx / w;
ty = 1 - (double) iny / h;
y = (1-ty)*y_min + ty*y_max;
x = (1-tx)*x_min + tx*x_max;
return x + y*1.0i;
}
bool complex_to_coord(
const rgba_image *img,
double complex z,
double complex min, double complex max,
size_t *outx, size_t *outy) {
size_t w, h;
double tx, ty;
double x = creal(z);
double y = cimag(z);
double x_min = creal(min);
double x_max = creal(max);
double y_min = cimag(min);
double y_max = cimag(max);
rgbaimg_get_dimensions(img, &w, &h);
tx = (x - x_min) / (x_max - x_min);
ty = (y - y_min) / (y_max - y_min);
if (tx < 0 || ty < 0 || tx >= 1 || ty >= 1) {
return false;
}
*outx = (size_t) (0.5 + (tx*w));
*outy = (size_t) (0.5 + ((1-ty) * h));
return true;
}
void complex_to_color(
double complex z,
float *hue,
float *value) {
*hue = 360.0f * (carg(z) / (2*M_PI));
*hue = fmodf(*hue + 360.0f, 360.0f);
*value = 1-exp(-cabs(z));
}
rgba_image *warp(
const rgba_image *input, complex_f transformation, const void *arg) {
return warp_ext(
input, transformation, arg,
0+0.0j, 1+1.0j,
0+0.0j, 1+1.0j,
0, 0);
}
rgba_image *warp_ext(
const rgba_image *input, complex_f transformation, const void *arg,
double complex min_in, double complex max_in,
double complex min_out, double complex max_out,
size_t out_width, size_t out_height) {
/* Coordinates for scanning input image */
double complex z0;
/* Coordinates for mapping into output image */
double complex z1;
coord c0;
size_t idx;
size_t i0, j0;
size_t i1, j1;
size_t in_width, in_height;
size_t in_n_pixels;
/*
* A matrix of lists such that, if
* (i0, j0) \in mapping[i1][j1]
* then the complex numbers z0, corresponding to (j0, i0) in the input
* space, and z1, corresponding to (j1, i1), satisfy:
* z1 = transformation(z0)
*
* This is used later on to determine the color of a given pixel
* in the output image given the coordinates which hit it.
*/
coord_list ***mapping;
rgba_image *output;
rgbaimg_get_dimensions(input, &in_width, &in_height);
output = rgbaimg_create(out_width, out_height);
in_n_pixels = in_width * in_height;
if (out_width == 0) {
out_width = in_width;
}
if (out_height == 0) {
out_height = in_height;
}
/* All mapping rows set to NULL until it is hit */
mapping = calloc(out_height, sizeof(*mapping));
/* Loop through a lattice in the input space */
#pragma omp parallel for\
num_threads(4)\
private(z0, z1, c0, i0, j0, i1, j1)\
schedule(static)
for (idx = 0; idx < in_n_pixels; idx++) {
i0 = idx / in_width;
j0 = idx % in_width;
z0 = coord_to_complex(input, j0, i0, min_in, max_in);
/* Apply function */
z1 = transformation(z0, arg);
if (complex_to_coord(output, z1, min_out, max_out, &j1, &i1)) {
c0.x = j0;
c0.y = i0;
/* Create coordinate list if it doesn't exist */
#pragma omp critical
{
if (mapping[i1] == NULL) {
mapping[i1] = calloc(out_width, sizeof(**mapping));
}
}
#pragma omp critical
{
if (mapping[i1][j1] == NULL) {
mapping[i1][j1] = clist_create(OUTPUT_MAP_INITIAL_CAP);
}
}
#pragma omp critical
{
clist_add(mapping[i1][j1], c0);
}
}
}
/* Set output pixels and destroy coordinate lists */
for (i0 = 0; i0 < out_height; i0++) {
if (mapping[i0] != NULL) {
for (j0 = 0; j0 < out_width; j0++) {
if (mapping[i0][j0] != NULL) {
_set_out_pixel(input, output, j0, i0, mapping[i0][j0]);
clist_destroy(mapping[i0][j0]);
}
}
free(mapping[i0]);
}
}
free(mapping);
return output;
}
void imprint(rgba_image *canvas, color_f color, const void *arg) {
imprint_ext(
canvas,
(-1-1.0i), (+1+1.0i),
color, arg);
}
void imprint_ext(
rgba_image *canvas,
double complex min, double complex max,
color_f color, const void *arg) {
size_t width, height;
size_t i, j;
double complex z;
double alpha;
rgba_pixel cur_pixel, new_pixel;
rgbaimg_get_dimensions(canvas, &width, &height);
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
z = coord_to_complex(canvas, j, i, min, max);
rgbaimg_get_pixel(canvas, j, i, &cur_pixel);
new_pixel = cur_pixel;
color(&new_pixel, z, arg);
alpha = (double) new_pixel.a / UINT8_MAX;
new_pixel.r = alpha * new_pixel.r + (1-alpha) * cur_pixel.r;
new_pixel.g = alpha * new_pixel.g + (1-alpha) * cur_pixel.g;
new_pixel.b = alpha * new_pixel.b + (1-alpha) * cur_pixel.b;
new_pixel.a = cur_pixel.a;
rgbaimg_set_pixel(canvas, j, i, new_pixel);
}
}
} |
target-29.c | #include <omp.h>
#include <stdlib.h>
struct S { char p[64]; int a; int b[2]; long c[4]; int *d; char q[64]; };
__attribute__((noinline, noclone)) void
foo (struct S s)
{
int d = omp_get_default_device ();
int id = omp_get_initial_device ();
int sep = 1;
if (d < 0 || d >= omp_get_num_devices ())
d = id;
int err;
#pragma omp target map(tofrom: s.a, s.b, s.c[1:2], s.d[-2:3]) map(to: sep) map(from: err)
{
err = s.a != 11 || s.b[0] != 12 || s.b[1] != 13;
err |= s.c[1] != 15 || s.c[2] != 16 || s.d[-2] != 18 || s.d[-1] != 19 || s.d[0] != 20;
s.a = 35; s.b[0] = 36; s.b[1] = 37;
s.c[1] = 38; s.c[2] = 39; s.d[-2] = 40; s.d[-1] = 41; s.d[0] = 42;
sep = 0;
}
if (err) abort ();
err = s.a != 35 || s.b[0] != 36 || s.b[1] != 37;
err |= s.c[1] != 38 || s.c[2] != 39 || s.d[-2] != 40 || s.d[-1] != 41 || s.d[0] != 42;
if (err) abort ();
s.a = 50; s.b[0] = 49; s.b[1] = 48;
s.c[1] = 47; s.c[2] = 46; s.d[-2] = 45; s.d[-1] = 44; s.d[0] = 43;
if (sep
&& (omp_target_is_present (&s.a, d)
|| omp_target_is_present (s.b, d)
|| omp_target_is_present (&s.c[1], d)
|| omp_target_is_present (s.d, d)
|| omp_target_is_present (&s.d[-2], d)))
abort ();
#pragma omp target data map(alloc: s.a, s.b, s.c[1:2], s.d[-2:3])
{
if (!omp_target_is_present (&s.a, d)
|| !omp_target_is_present (s.b, d)
|| !omp_target_is_present (&s.c[1], d)
|| !omp_target_is_present (s.d, d)
|| !omp_target_is_present (&s.d[-2], d))
abort ();
#pragma omp target update to(s.a, s.b, s.c[1:2], s.d[-2:3])
#pragma omp target map(alloc: s.a, s.b, s.c[1:2], s.d[-2:3]) map(from: err)
{
err = s.a != 50 || s.b[0] != 49 || s.b[1] != 48;
err |= s.c[1] != 47 || s.c[2] != 46 || s.d[-2] != 45 || s.d[-1] != 44 || s.d[0] != 43;
s.a = 17; s.b[0] = 18; s.b[1] = 19;
s.c[1] = 20; s.c[2] = 21; s.d[-2] = 22; s.d[-1] = 23; s.d[0] = 24;
}
#pragma omp target update from(s.a, s.b, s.c[1:2], s.d[-2:3])
}
if (sep
&& (omp_target_is_present (&s.a, d)
|| omp_target_is_present (s.b, d)
|| omp_target_is_present (&s.c[1], d)
|| omp_target_is_present (s.d, d)
|| omp_target_is_present (&s.d[-2], d)))
abort ();
if (err) abort ();
err = s.a != 17 || s.b[0] != 18 || s.b[1] != 19;
err |= s.c[1] != 20 || s.c[2] != 21 || s.d[-2] != 22 || s.d[-1] != 23 || s.d[0] != 24;
if (err) abort ();
s.a = 33; s.b[0] = 34; s.b[1] = 35;
s.c[1] = 36; s.c[2] = 37; s.d[-2] = 38; s.d[-1] = 39; s.d[0] = 40;
#pragma omp target enter data map(alloc: s.a, s.b, s.c[1:2], s.d[-2:3])
if (!omp_target_is_present (&s.a, d)
|| !omp_target_is_present (s.b, d)
|| !omp_target_is_present (&s.c[1], d)
|| !omp_target_is_present (s.d, d)
|| !omp_target_is_present (&s.d[-2], d))
abort ();
#pragma omp target enter data map(always, to: s.a, s.b, s.c[1:2], s.d[-2:3])
#pragma omp target map(alloc: s.a, s.b, s.c[1:2], s.d[-2:3]) map(from: err)
{
err = s.a != 33 || s.b[0] != 34 || s.b[1] != 35;
err |= s.c[1] != 36 || s.c[2] != 37 || s.d[-2] != 38 || s.d[-1] != 39 || s.d[0] != 40;
s.a = 49; s.b[0] = 48; s.b[1] = 47;
s.c[1] = 46; s.c[2] = 45; s.d[-2] = 44; s.d[-1] = 43; s.d[0] = 42;
}
#pragma omp target exit data map(always, from: s.a, s.b, s.c[1:2], s.d[-2:3])
if (!omp_target_is_present (&s.a, d)
|| !omp_target_is_present (s.b, d)
|| !omp_target_is_present (&s.c[1], d)
|| !omp_target_is_present (s.d, d)
|| !omp_target_is_present (&s.d[-2], d))
abort ();
#pragma omp target exit data map(release: s.a, s.b, s.c[1:2], s.d[-2:3])
if (sep
&& (omp_target_is_present (&s.a, d)
|| omp_target_is_present (s.b, d)
|| omp_target_is_present (&s.c[1], d)
|| omp_target_is_present (s.d, d)
|| omp_target_is_present (&s.d[-2], d)))
abort ();
if (err) abort ();
err = s.a != 49 || s.b[0] != 48 || s.b[1] != 47;
err |= s.c[1] != 46 || s.c[2] != 45 || s.d[-2] != 44 || s.d[-1] != 43 || s.d[0] != 42;
if (err) abort ();
}
int
main ()
{
int d[3] = { 18, 19, 20 };
struct S s = { {}, 11, { 12, 13 }, { 14, 15, 16, 17 }, d + 2, {} };
foo (s);
return 0;
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/LocInfoType.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
// HLSL Change Starts
#include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - all sema use is heavily language-dependant
namespace hlsl {
struct UnusualAnnotation;
}
// HLSL Change Ends
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
class InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class AttributeList;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class ExternalSemaSource;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPClause;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///\brief Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///\brief Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
// We are about to link these. It is now safe to compute the linkage of
// the new decl. If the new decl has external linkage, we will
// link it with the hidden decl (which also has external linkage) and
// it will keep having external linkage. If it has internal linkage, we
// will not link it. Since it has no previous decls, it will remain
// with internal linkage.
if (getLangOpts().ModulesHideInternalLinkage)
return isVisible(Old) || New->isExternallyVisible();
return true;
}
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// \brief Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// \brief Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// \brief Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
/// PackContext - Manages the stack for \#pragma pack. An alignment
/// of 0 indicates default alignment.
void *PackContext; // Really a "PragmaPackStack*"
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// \brief Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
// HLSL Change Begin - pragma pack_matrix.
// Add both row/col to identify the default case which no pragma.
bool PackMatrixRowMajorPragmaOn = false; // True when \#pragma pack_matrix(row_major) on.
bool PackMatrixColMajorPragmaOn = false; // True when \#pragma pack_matrix(column_major) on.
// HLSL Change End.
enum PragmaVtorDispKind {
PVDK_Push, ///< #pragma vtordisp(push, mode)
PVDK_Set, ///< #pragma vtordisp(mode)
PVDK_Pop, ///< #pragma vtordisp(pop)
PVDK_Reset ///< #pragma vtordisp()
};
enum PragmaMsStackAction {
PSK_Reset, // #pragma ()
PSK_Set, // #pragma ("name")
PSK_Push, // #pragma (push[, id])
PSK_Push_Set, // #pragma (push[, id], "name")
PSK_Pop, // #pragma (pop[, id])
PSK_Pop_Set, // #pragma (pop[, id], "name")
};
/// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
///
/// The stack always has at least one element in it.
SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// \brief Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
Slot(llvm::StringRef StackSlotLabel,
ValueType Value,
SourceLocation PragmaLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
explicit PragmaStack(const ValueType &Value)
: CurrentValue(Value) {}
SmallVector<Slot, 2> Stack;
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// \brief This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// \brief Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// ExprNeedsCleanups - True if the current evaluation context
/// requires cleanups to be run at its conclusion.
bool ExprNeedsCleanups;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// \brief Store a list of either DeclRefExprs or MemberExprs
/// that contain a reference to a variable (constant) that may or may not
/// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue
/// and discarded value conversions have been applied to all subexpressions
/// of the enclosing full expression. This is cleared at the end of each
/// full expression.
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
/// \brief Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
///
/// This array is never empty. Clients should ignore the first
/// element, which is used to cache a single FunctionScopeInfo
/// that's used to parse every top-level function.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType;
/// \brief Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// \brief Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// \brief Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// \brief Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// \brief All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// \brief The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// \brief All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// \brief All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedExceptionSpecChecks;
/// \brief All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// \brief Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// \brief The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// \brief RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC)
{
S.PushFunctionScope();
S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
}
~SynthesizedFunctionScope() {
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// \brief Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// \brief The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// \brief The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// \brief The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// \brief Caches identifiers/selectors for NSFoundation APIs.
// std::unique_ptr<NSAPI> NSAPIObj; // HLSL Change
/// \brief The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// \brief The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// \brief Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// \brief Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// \brief The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// \brief The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// \brief Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// \brief The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// \brief The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// \brief The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// \brief The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// \brief The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// \brief id<NSCopying> type.
QualType QIDNSCopying;
/// \brief will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// \brief counter for internal MS Asm label names.
unsigned MSAsmLabelNameCounter;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum ExpressionEvaluationContext {
/// \brief The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// \brief The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// \brief The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// \brief The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// \brief The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// \brief Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// \brief The expression evaluation context.
ExpressionEvaluationContext Context;
/// \brief Whether the enclosing context needed a cleanup.
bool ParentNeedsCleanups;
/// \brief Whether we are in a decltype expression.
bool IsDecltype;
/// \brief The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// \brief The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// \brief The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// \brief The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// \brief The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering;
/// \brief If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// \brief If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
bool ParentNeedsCleanups,
Decl *ManglingContextDecl,
bool IsDecltype)
: Context(Context), ParentNeedsCleanups(ParentNeedsCleanups),
IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering() { }
/// \brief Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == Unevaluated || Context == UnevaluatedAbstract;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// \brief Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
/// \brief A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache;
/// \brief The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// \brief The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// \brief A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
void ReadMethodPool(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// \brief Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema& S)
: S(S), OldFPContractState(S.FPFeatures.fp_contract) {}
~FPContractStateRAII() {
S.FPFeatures.fp_contract = OldFPContractState;
}
private:
Sema& S;
bool OldFPContractState : 1;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// \brief Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///\brief Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// \brief Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// \brief Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// \brief Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// \brief Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// \brief Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// \brief Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// \brief Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// \brief This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const {
if (FunctionScopes.empty())
return nullptr;
for (int e = FunctionScopes.size()-1; e >= 0; --e) {
if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
continue;
return FunctionScopes[e];
}
return nullptr;
}
template <typename ExprT>
void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) {
if (!isUnevaluatedContext())
getCurFunction()->recordUseOfWeak(E, IsRead);
}
void PushCompoundScope();
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// \brief Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// \brief Retrieve the current lambda scope info, if any.
sema::LambdaScopeInfo *getCurLambda();
/// \brief Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// \brief Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
unsigned deduceWeakPropertyFromType(QualType T) {
if ((getLangOpts().getGC() != LangOptions::NonGC &&
T.isObjCGCWeak()) ||
(getLangOpts().ObjCAutoRefCount &&
T.getObjCLifetime() == Qualifiers::OCL_Weak))
return ObjCDeclSpec::DQ_PR_weak;
return 0;
}
/// \brief Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// \brief Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc,
bool *MissingExceptionSpecification = nullptr,
bool *MissingEmptyExceptionSpecification = nullptr,
bool AllowNoexceptAllMatchWithNoSpec = false,
bool IsOperatorNew = false);
bool CheckExceptionSpecSubset(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Superset, SourceLocation SuperLoc,
const FunctionProtoType *Subset, SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID,
const FunctionProtoType *Target, SourceLocation TargetLoc,
const FunctionProtoType *Source, SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// \brief The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// \brief Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
bool Suppressed;
TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { }
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
if (Suppressed)
return;
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
VisibleModuleSet VisibleModules;
llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack;
Module *CachedFakeTopLevelModule;
public:
/// \brief Get the module owning an entity.
Module *getOwningModule(Decl *Entity);
/// \brief Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc);
bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); }
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
bool hasVisibleMergedDefinition(NamedDecl *Def);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
/// List of decls defined in a function prototype. This contains EnumConstants
/// that incorrectly end up in translation unit scope because there is no
/// function to pin them on. ActOnFunctionDeclarator reads this list and patches
/// them into the FunctionDecl.
std::vector<NamedDecl*> DeclsInPrototypeScope;
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false,
bool HasTrailingDot = false,
ParsedType ObjectType = ParsedType(),
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool AllowClassTemplates = false);
/// \brief For compatibility with MSVC, we delay parsing of some default
/// template type arguments until instantiation time. Emits a warning and
/// returns a synthesized DependentNameType that isn't really dependent on any
/// other template arguments.
ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II,
SourceLocation NameLoc);
/// \brief Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
const IdentifierInfo *Keyword;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword)
: Kind(NC_Keyword), Keyword(Keyword) { }
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// \brief Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification
ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
SourceLocation Loc);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R);
void CheckShadow(Scope *S, VarDecl *D);
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
// HLSL Change Starts
// This enumeration is used to determine whether a variable declaration
// should shadow a prior declaration rather than merging.
enum ShadowMergeState {
ShadowMergeState_Disallowed, // shadowing is not allowed
ShadowMergeState_Possible, // shadowing is possible (but may not occur)
ShadowMergeState_Effective // the declaration should shadow a prior one
};
// HLSL Change Ends
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state
void CheckVariableDeclarationType(VarDecl *NewVD);
void CheckCompleteVariableDeclaration(VarDecl *var);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsExplicitSpecialization);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SCm, hlsl::ParameterModifier ParamMod); // HLSL Change
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
bool TypeMayContainAuto);
void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group,
bool TypeMayContainAuto = true);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(FunctionDecl *FD,
const FunctionDecl *EffectiveDefinition =
nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// \brief Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// \brief Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineMethodDef(CXXMethodDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// \brief Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ParmVarDecl * const *Begin,
ParmVarDecl * const *End);
/// \brief Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin,
ParmVarDecl * const *End,
QualType ReturnTy,
NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// \brief Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S,
AttributeList *AttrList,
SourceLocation SemiLoc);
/// \brief The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// \brief The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// \brief Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument
};
/// \brief Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
bool NeedDefinition, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
/// \brief Retrieve a suitable printing policy.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// \brief Retrieve a suitable printing policy.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation = false);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
struct SkipBodyInfo {
SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {}
bool ShouldSkip;
NamedDecl *Previous;
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
AttributeList *MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields,
SourceLocation LBrac, SourceLocation RBrac,
AttributeList *AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
typedef void *SkippedDefinitionContext;
/// \brief Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceLocation RBraceLoc);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// \brief Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
AttributeList *Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
SourceLocation RBraceLoc, Decl *EnumDecl,
ArrayRef<Decl *> Elements,
Scope *S, AttributeList *Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// \brief Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool Override,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
/// \brief Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// \brief Don't merge availability attributes at all.
AMK_None,
/// \brief Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// \brief Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override
};
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous, ShadowMergeState& MergeState); // HLSL Change - add merge state
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld, ShadowMergeState& MergeState); // HLSL Change - add merge state
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl);
/// \brief Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsNoReturnConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr ///< Constant expression in a noptr-new-declarator.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// \brief Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// \brief Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// \brief Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType());
// Emit as a series of 'note's all template and non-templates
// identified by the expression Expr
void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType());
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
const SourceRange& OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
// An enum to represent whether something is dealing with a call to begin()
// or a call to end() in a range-based for loop.
enum BeginEndFunction {
BEF_begin,
BEF_end
};
ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc,
SourceLocation RangeLoc,
VarDecl *Decl,
BeginEndFunction BEF,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *input);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ParmVarDecl *const *Param,
ParmVarDecl *const *ParamEnd,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// @brief Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// \brief Look up any declaration with any name.
LookupAnyName
};
/// \brief Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// \brief The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// \brief The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists.
ForRedeclaration
};
/// \brief The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// \brief The lookup resulted in an error.
LOLR_Error,
/// \brief The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// \brief The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT;
TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT;
};
/// \brief The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// \brief Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// \brief The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// \brief Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// \brief Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// \brief Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions,
DeclAccessPair Operator,
QualType T1, QualType T2);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// \brief Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const AttributeList *AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const AttributeList &attr, unsigned &value);
bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckNoReturnAttr(const AttributeList &attr);
bool checkStringLiteralArgumentAttr(const AttributeList &Attr,
unsigned ArgNum, StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type.
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param nullabilityLoc The location of the nullability specifier.
///
/// \param isContextSensitive Whether this nullability specifier was
/// written as a context-sensitive keyword (in an Objective-C
/// method) or an Objective-C property attribute, rather than as an
/// underscored type specifier.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability,
SourceLocation nullabilityLoc,
bool isContextSensitive);
/// \brief Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl *IDecl);
void DefaultSynthesizeProperties(Scope *S, Decl *D);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
bool *isOverridingProperty,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// \brief Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// \brief - Returns instance or factory methods in global method pool for
/// given selector. If no such method or only one method found, function returns
/// false; otherwise, it returns true
bool CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool instance);
bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R,
bool receiverIdOrClass);
void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// \brief - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance);
/// \brief Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnHlslDiscardStmt(SourceLocation Loc); // HLSL Change
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// \brief A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S): S(S) {
S.ActOnStartOfCompoundStmt();
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation DotDotDotLoc, Expr *RHSVal,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
StmtResult ActOnIfStmt(SourceLocation IfLoc,
FullExprArg CondVal, Decl *CondVar,
Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Expr *Cond,
Decl *CondVar);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc,
FullExprArg Cond,
Decl *CondVar, Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc,
SourceLocation CondLParen, Expr *Cond,
SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First, FullExprArg Second,
Decl *SecondVar,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *BeginEndDecl,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
bool AllowFunctionParameters);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
bool AllowFunctionParameters);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
llvm::InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// \brief If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial };
void EmitAvailabilityWarning(AvailabilityDiagnostic AD,
NamedDecl *D, StringRef Message,
SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
bool ObjCPropertyAccess);
bool makeUnavailableInSystemHeader(SourceLocation loc,
StringRef message);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D);
bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass=nullptr,
bool ObjCPropertyAccess=false);
void NoteDeletedFunction(FunctionDecl *FD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
ReuseLambdaContextDecl_t,
bool IsDecltype = false);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool OdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// \brief Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// \brief Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// \brief Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// \brief Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// \brief Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// \brief Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// \brief Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult BuildQualifiedDeclarationNameExpr(
CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentType IT);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
const SourceRange &ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Arg,
SourceLocation RParenLoc,
Expr *Config = nullptr,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// \brief Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// \brief Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// \brief The symbol exists.
IER_Exists,
/// \brief The symbol does not exist.
IER_DoesNotExist,
/// \brief The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// \brief An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
// HLSL Change Starts
//===---------------------------- HLSL Features -------------------------===//
/// cbuffer/tbuffer
llvm::SmallVector<Decl*, 1> HLSLBuffers;
Decl* ActOnStartHLSLBuffer(Scope* bufferScope, bool cbuffer, SourceLocation KwLoc,
IdentifierInfo *Ident, SourceLocation IdentLoc,
std::vector<hlsl::UnusualAnnotation *>& BufferAttributes,
SourceLocation LBrace);
void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace);
Decl* getActiveHLSLBuffer() const;
void ActOnStartHLSLBufferView();
bool IsOnHLSLBufferView();
Decl *ActOnHLSLBufferView(Scope *bufferScope, SourceLocation KwLoc,
DeclGroupPtrTy &dcl, bool iscbuf);
// HLSL Change Ends
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc,
IdentifierInfo *Ident,
SourceLocation LBrace,
AttributeList *AttrList);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
CXXRecordDecl *getStdBadAlloc() const;
/// \brief Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// \brief Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// \brief Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const CXXConstructorDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
AttributeList *AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
DeclarationNameInfo NameInfo,
AttributeList *AttrList,
bool IsInstantiation,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
Decl *ActOnUsingDeclaration(Scope *CurScope,
AccessSpecifier AS,
bool HasUsingKeyword,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
AttributeList *AttrList,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
Decl *ActOnAliasDeclaration(Scope *CurScope,
AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc,
UnqualifiedId &Name,
AttributeList *AttrList,
TypeResult Type,
Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// \brief Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// \brief Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(ComputedEST != EST_ComputedNoexcept &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// \brief The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// \brief The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// \brief Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// \brief Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// \brief Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_ComputedNoexcept;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// \brief Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD);
/// \brief Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// \brief Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// \brief Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// \brief Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
/// \brief Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
/// \brief Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// \brief Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
CXXDestructorDecl *Destructor);
/// \brief Declare all inheriting constructors for the given class.
///
/// \param ClassDecl The class declaration into which the inheriting
/// constructors will be added.
void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl);
/// \brief Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// \brief Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// \brief Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// \brief Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// \brief Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// \brief Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// \brief Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// \brief When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// \brief RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// \brief Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// \brief Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr);
/// \brief Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer,
bool TypeMayContainAuto = true);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
bool UseGlobal, QualType AllocType, bool IsArray,
MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete);
bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
DeclarationName Name, MultiExprArg Args,
DeclContext *Ctx,
bool AllowMissing, FunctionDecl *&Operator,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
QualType Param1,
QualType Param2 = QualType(),
bool addRestrictAttr = false);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
DeclarationName Name);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
bool ConvertToBoolean);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// \brief Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the bianry type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false,
bool IsLambdaInitCaptureInitializer = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// \brief The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// \brief The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
SourceLocation IdLoc,
IdentifierInfo &II,
ParsedType ObjectType);
bool BuildCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
QualType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr);
/// \brief The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param Identifier The identifier preceding the '::'.
///
/// \param IdentifierLoc The location of the identifier.
///
/// \param CCLoc The location of the '::'.
///
/// \param ObjectType The type of the object, if we're parsing
/// nested-name-specifier in a member access expression.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
ParsedType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation ColonLoc,
ParsedType ObjectType,
bool EnteringContext);
/// \brief The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// \brief Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// \brief Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// \brief Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// \brief Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params);
/// \brief Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// \brief Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
QualType performLambdaInitCaptureInitialization(SourceLocation Loc,
bool ByRef, IdentifierInfo *Id, Expr *&Init);
/// \brief Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType, IdentifierInfo *Id, Expr *Init);
/// \brief Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// \brief Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// \brief Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// \brief Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// \brief Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// \brief Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
Expr **Strings,
unsigned NumStrings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
ObjCDictionaryElement *Elements,
unsigned NumElements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access,
SourceLocation ASLoc,
SourceLocation ColonLoc,
AttributeList *Attrs = nullptr);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// \brief The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// \brief The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// \brief The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// \brief Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// \brief Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// \brief Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// \brief Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
Decl *TagDecl,
SourceLocation LBrac,
SourceLocation RBrac,
AttributeList *AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXMemberDefaultArgs(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
unsigned NumBases);
void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases,
unsigned NumBases);
bool IsDerivedFrom(QualType Derived, QualType Base);
bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// \brief When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
AbstractDiagSelID SelID = AbstractNone);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
Decl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
Decl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
Decl **Params, unsigned NumParams,
SourceLocation RAngleLoc);
/// \brief The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
TemplateParameterList *TemplateParams,
AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc,
unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false);
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template);
DeclResult
ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc,
SourceLocation ModulePrivateLoc,
TemplateIdAnnotation &TemplateId,
AttributeList *Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
const CXXScopeSpec &SS,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
AttributeList *Attr);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
AttributeList *Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// \brief Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// \brief The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// \brief The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// \brief The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// \brief Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateArgumentLoc &Arg,
unsigned ArgumentPackIndex);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// \brief Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// \brief We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// \brief We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// \brief We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// \brief Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// \brief Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// \brief The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// \brief An arbitrary expression.
UPPC_Expression = 0,
/// \brief The base type of a class type.
UPPC_BaseType,
/// \brief The type of an arbitrary declaration.
UPPC_DeclarationType,
/// \brief The type of a data member.
UPPC_DataMemberType,
/// \brief The size of a bit-field.
UPPC_BitFieldWidth,
/// \brief The expression in a static assertion.
UPPC_StaticAssertExpression,
/// \brief The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// \brief The enumerator value.
UPPC_EnumeratorValue,
/// \brief A using declaration.
UPPC_UsingDeclaration,
/// \brief A friend declaration.
UPPC_FriendDeclaration,
/// \brief A declaration qualifier.
UPPC_DeclarationQualifier,
/// \brief An initializer.
UPPC_Initializer,
/// \brief A default argument.
UPPC_DefaultArgument,
/// \brief The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// \brief The type of an exception.
UPPC_ExceptionType,
/// \brief Partial specialization.
UPPC_PartialSpecialization,
/// \brief Microsoft __if_exists.
UPPC_IfExists,
/// \brief Microsoft __if_not_exists.
UPPC_IfNotExists,
/// \brief Lambda expression.
UPPC_Lambda,
/// \brief Block expression,
UPPC_Block
};
/// \brief Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// \brief If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// \brief If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// \brief If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// \brief If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param SS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(CXXScopeSpec &SS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// \brief Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// \brief Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// \brief Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType);
/// \brief Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// \brief Template argument deduction was successful.
TDK_Success = 0,
/// \brief The declaration was invalid; do nothing.
TDK_Invalid,
/// \brief Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// \brief Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// \brief Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// \brief Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// \brief Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// \brief A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// \brief When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// \brief When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// \brief The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// \brief The arguments included an overloaded function name that could
/// not be resolved to a suitable function.
TDK_FailedOverloadResolution,
/// \brief Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType,
unsigned ArgIdx,
QualType OriginalArgType)
: OriginalParamType(OriginalParamType), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) { }
QualType OriginalParamType;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult
FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool PartialOverloading = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
/// \brief Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// \brief Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// \brief Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer,
QualType &Result);
DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer,
QualType &Result);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// \brief A template instantiation that is currently in progress.
struct ActiveTemplateInstantiation {
/// \brief The kind of template instantiation we are performing
enum InstantiationKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template, and
/// TemplateArgs/NumTemplateArguments provides the template
/// arguments as specified.
/// FIXME: Use a TemplateArgumentList
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a ClassTemplatePartialSpecializationDecl or
/// a FunctionTemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation
} Kind;
/// \brief The point of instantiation within the source code.
SourceLocation PointOfInstantiation;
/// \brief The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// \brief The entity that is being instantiated.
Decl *Entity;
/// \brief The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
/// \brief The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// \brief The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// \brief The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
ActiveTemplateInstantiation()
: Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// \brief Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
friend bool operator==(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
if (X.Kind != Y.Kind)
return false;
if (X.Entity != Y.Entity)
return false;
switch (X.Kind) {
case TemplateInstantiation:
case ExceptionSpecInstantiation:
return true;
case PriorTemplateArgumentSubstitution:
case DefaultTemplateArgumentChecking:
return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs;
case DefaultTemplateArgumentInstantiation:
case ExplicitTemplateArgumentSubstitution:
case DeducedTemplateArgumentSubstitution:
case DefaultFunctionArgumentInstantiation:
return X.TemplateArgs == Y.TemplateArgs;
}
llvm_unreachable("Invalid InstantiationKind!");
}
friend bool operator!=(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
return !(X == Y);
}
};
/// \brief List of active template instantiations.
///
/// This vector is treated as a stack. As one template instantiation
/// requires another template instantiation, additional
/// instantiations are pushed onto the stack up to a
/// user-configurable limit LangOptions::InstantiationDepth.
SmallVector<ActiveTemplateInstantiation, 16>
ActiveTemplateInstantiations;
/// \brief Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules;
/// \brief Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// \brief Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// \brief Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// \brief The number of ActiveTemplateInstantiation entries in
/// \c ActiveTemplateInstantiations that are not actual instantiations and,
/// therefore, should not be counted as part of the instantiation depth.
unsigned NonInstantiationEntries;
/// \brief The last template from which a template instantiation
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant template
/// instantiation backtraces when there are multiple errors in the
/// same instantiation. FIXME: Does this belong in Sema? It's tough
/// to implement it anywhere else.
ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
/// \brief The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// \brief RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// \brief The stack of calls expression undergoing template instantiation.
///
/// The top of this stack is used by a fixit instantiating unresolved
/// function calls to fix the AST to match the textual change it prints.
SmallVector<CallExpr *, 8> CallsUndergoingInstantiation;
/// \brief For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// \brief A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// \brief Note that we are instantiating a class template,
/// function template, or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// \brief Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
ActiveTemplateInstantiation::InstantiationKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// \brief Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
private:
Sema &SemaRef;
bool Invalid;
bool SavedInNonInstantiationSFINAEContext;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(),
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void PrintInstantiationStack();
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// \brief Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// \brief RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE)
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
}
/// \brief Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// \brief RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// \brief The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// \brief Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// \brief The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// \brief A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// \brief Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// \brief An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// \brief The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
class SavePendingInstantiationsAndVTableUsesRAII {
public:
SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
~SavePendingInstantiationsAndVTableUsesRAII() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// \brief The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class SavePendingLocalImplicitInstantiationsRAII {
public:
SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
~SavePendingLocalImplicitInstantiationsRAII() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc,
ParmVarDecl **Params, unsigned NumParams,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param NumExprs The number of expressions in \p Exprs.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateStaticDataMemberDefinition(
SourceLocation PointOfInstantiation,
VarDecl *Var,
bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc,
IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc,
Decl * const *ProtoRefNames, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName,
SourceLocation CategoryLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
const IdentifierLocPair *IdentList,
unsigned NumElts,
AttributeList *attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
const IdentifierLocPair *ProtocolId,
unsigned NumProtocols,
SmallVectorImpl<Decl *> &Protocols);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Check the application of the Objective-C '__kindof' qualifier to
/// the given type.
bool checkObjCKindOfType(QualType &type, SourceLocation loc);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
/// \param CD The semantic container for the property
/// \param redeclaredProperty Declaration for property if redeclared
/// in class extension.
/// \param lexicalDC Container for redeclaredProperty.
void ProcessPropertyDecl(ObjCPropertyDecl *property,
ObjCContainerDecl *CD,
ObjCPropertyDecl *redeclaredProperty = nullptr,
ObjCContainerDecl *lexicalDC = nullptr);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
bool *OverridingProperty,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
AttributeList *ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType,
ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo,
DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// \brief Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// \brief The message is sent to 'super'.
ObjCSuperMessage,
/// \brief The message is an instance message.
ObjCInstanceMessage,
/// \brief The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// \brief Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// \brief Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
enum PragmaPackKind {
PPK_Default, // #pragma pack([n])
PPK_Show, // #pragma pack(show), only supported by MSVC.
PPK_Push, // #pragma pack(push, [identifier], [n])
PPK_Pop // #pragma pack(pop, [identifier], [n])
};
enum PragmaMSStructKind {
PMSST_OFF, // #pragms ms_struct off
PMSST_ON // #pragms ms_struct on
};
enum PragmaMSCommentKind {
PCK_Unknown,
PCK_Linker, // #pragma comment(linker, ...)
PCK_Lib, // #pragma comment(lib, ...)
PCK_Compiler, // #pragma comment(compiler, ...)
PCK_ExeStr, // #pragma comment(exestr, ...)
PCK_User // #pragma comment(user, ...)
};
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(PragmaPackKind Kind,
IdentifierInfo *Name,
Expr *Alignment,
SourceLocation PragmaLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// ActOnPragmaPackMatrix - Called on well formed \#pragma pack_matrix(...).
void ActOnPragmaPackMatrix(bool bRowMajor, SourceLocation PragmaLoc);
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// \brief Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// \brief Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// \brief Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT
void ActOnPragmaFPContract(tok::OnOffSwitch OOS);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// \brief Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// \brief Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// \brief Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// \brief Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
// OpenMP directives and clauses.
private:
void *VarDataSharingAttributesStack;
/// \brief Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op,
OpenMPClauseKind CKind);
public:
/// \brief Check if the specified variable is used in a private clause in
/// Checks if the specified variable is used in one of the private
/// clauses in OpenMP constructs.
bool IsOpenMPCapturedVar(VarDecl *VD);
/// OpenMP constructs.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level);
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// \brief Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// \brief Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// \brief End analysis of clauses.
void EndOpenMPClause();
/// \brief Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// \brief Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// \brief Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// \brief Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// \brief End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
unsigned Argument, Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ArgumentLoc,
SourceLocation CommaLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation KindLoc,
SourceLocation CommaLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'ordered' clause.
OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc);
/// \brief Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'reduction' clause.
OMPClause *
ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc,
SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId);
/// \brief Called on well-formed 'linear' clause.
OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList,
Expr *Step,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief The kind of conversion being performed.
enum CheckedConversionKind {
/// \brief An implicit conversion.
CCK_ImplicitConversion,
/// \brief A C-style cast.
CCK_CStyleCast,
/// \brief A functional-style cast.
CCK_FunctionalCast,
/// \brief A cast other than a C-style cast.
CCK_OtherCast
};
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointer - The assignment is between two pointers types which
/// point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and prepare for a conversion of the
/// RHS to the LHS type.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind);
// CheckSingleAssignmentConstraints - Currently used by
// CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking,
// this routine performs the default function/array converions.
AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
bool Diagnose = true,
bool DiagnoseCFAudited = false);
// \brief If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc,
bool isRelational);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool *NonStandardCompositeType = nullptr);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool *NonStandardCompositeType = nullptr) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp,
NonStandardCompositeType);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool isRelational);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible_With_Added_Qualification - The two types are
/// reference-compatible with added qualification, meaning that
/// they are reference-compatible and the qualifiers on T1 (cv1)
/// are greater than the qualifiers on T2 (cv2).
Ref_Compatible_With_Added_Qualification,
/// Ref_Compatible - The two types are reference-compatible and
/// have equivalent qualifiers (cv1 == cv2).
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// \brief Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// \brief Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged };
/// \brief Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds.
ARCConversionResult CheckObjCARCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// \brief Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// \brief If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// \brief Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc);
ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// \brief Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// \brief Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D);
bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \name Code completion
//@{
/// \brief Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// \brief Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// \brief Code completion occurs within a class, struct, or union.
PCC_Class,
/// \brief Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// \brief Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// \brief Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// \brief Code completion occurs following one or more template
/// headers.
PCC_Template,
/// \brief Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// \brief Code completion occurs within an expression.
PCC_Expression,
/// \brief Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// \brief Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// \brief Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// \brief Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// \brief Code completion occurs where only a type is permitted.
PCC_Type,
/// \brief Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// \brief Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool IsArrow);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
unsigned NumProtocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S,
bool IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
// HLSL Change Starts - checking array subscript access to vector or matrix member
void CheckHLSLArrayAccess(const Expr *expr);
// HLSL Change ends
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
ArrayRef<const Expr *> Args, bool IsMemberFunction,
SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinVAStartARM(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
int Low, int High);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinCpuSupports(CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr,
ArrayRef<const Expr *> Args, bool HasVAListArg,
unsigned format_idx, unsigned firstDataArg,
FormatStringType Type, bool inFunctionCall,
VariadicCallType CallType,
llvm::SmallBitVector &CheckedVarArgs);
bool FormatStringHasSArg(const StringLiteral *FExpr);
bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl,
IdentifierInfo *FnInfo);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS);
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// \brief Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// \brief Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// \brief Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// \brief Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// \brief A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// \brief Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const Expr * const *ExprArgs);
/// \brief The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
// HLSL Change Starts
bool DiagnoseHLSLDecl(Declarator& D, DeclContext* DC, Expr *BitWidth, TypeSourceInfo* TInfo, bool isParameter);
bool DiagnoseHLSLLookup(const LookupResult &R);
void TransferUnusualAttributes(Declarator& D, NamedDecl* NewDecl);
// HLSL Change Ends
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// \brief Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
AvailabilityResult getCurContextAvailability() const;
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// \brief To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
};
/// \brief RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
public:
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
IsDecltype);
}
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext,
Sema::ReuseLambdaContextDecl,
IsDecltype);
}
~EnterExpressionEvaluationContext() {
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// \brief Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// \brief The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
#endif
|
hash_mult_hw.h | #ifndef _HASH_MULT_HW_
#define _HASH_MULT_HW_
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
// #include <immintrin.h>
//#include <zmmintrin.h>
#include <algorithm>
#include "utility.h"
#include "CSR.h"
#include "BIN.h"
/* SpGEMM Specific Parameters */
#define HASH_SCAL 107 // Set disjoint number to SH_SIZE
#define IMB_PWMIN 8
#define B_PWMIN 8
#define VEC_LENGTH 8
#define VEC_LENGTH_BIT 3
#define VEC_LENGTH_LONG 4
#define VEC_LENGTH_LONG_BIT 2
template <class IT, class NT>
inline void hash_symbolic_kernel(const IT *arpt, const IT *acol, const IT *brpt, const IT *bcol, BIN<IT, NT> &bin, unsigned threadCount)
{
#pragma omp parallel num_threads(threadCount)
{
IT i, tid, start_row, end_row;
tid = omp_get_thread_num();
start_row = bin.rows_offset[tid];
end_row = bin.rows_offset[tid + 1];
IT *check = bin.local_hash_table_id[tid];
for (i = start_row; i < end_row; ++i) {
IT j, k, bid;
IT key, hash, old;
IT nz, SH_ROW;
IT t_acol;
nz = 0;
bid = bin.bin_id[i];
if (bid > 0) {
SH_ROW = IMB_PWMIN << (bid - 1);
for (j = 0; j < SH_ROW; ++j) {
check[j] = -1;
}
for (j = arpt[i]; j < arpt[i + 1]; ++j) {
t_acol = acol[j];
for (k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
key = bcol[k];
hash = (key * HASH_SCAL) & (SH_ROW - 1);
while (1) {
if (check[hash] == key) {
break;
}
else if (check[hash] == -1) {
check[hash] = key;
nz++;
break;
}
else {
hash = (hash + 1) & (SH_ROW - 1); //hash = (hash + 1) % SH_ROW
}
}
}
}
}
bin.row_nz[i] = nz;
}
}
}
template <class IT, class NT>
inline void hash_symbolic(const IT *arpt, const IT *acol, const IT *brpt, const IT *bcol, IT *crpt, BIN<IT, NT> &bin, const IT nrow, IT *nnz, unsigned threadCount)
{
IT i;
hash_symbolic_kernel(arpt, acol, brpt, bcol, bin, threadCount);
/* Set row pointer of matrix C */
scan(bin.row_nz, crpt, nrow + 1);
*nnz = crpt[nrow];
}
template <typename IT, typename NT>
bool sort_less(const pair<IT, NT> &left,const pair<IT, NT> &right)
{
return left.first < right.first;
}
template <bool sortOutput, typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
inline void hash_numeric(const IT *arpt, const IT *acol, const NT *aval, const IT *brpt, const IT *bcol, const NT *bval, const IT *crpt, IT *ccol, NT *cval,const BIN<IT, NT> &bin, const MultiplyOperation multop, const AddOperation addop, unsigned threadCount)
{
#pragma omp parallel num_threads(threadCount)
{
IT i, tid, start_row, end_row;
IT *shared_check;
NT *shared_value;
tid = omp_get_thread_num();
start_row = bin.rows_offset[tid];
end_row = bin.rows_offset[tid + 1];
shared_check = bin.local_hash_table_id[tid];
shared_value = bin.local_hash_table_val[tid];
for (i = start_row; i < end_row; ++i) {
IT j, k, bid, index;
IT SH_ROW;
IT t_acol, hash, key, offset;
NT t_aval, t_val;
bid = bin.bin_id[i];
if (bid > 0) {
offset = crpt[i];
SH_ROW = B_PWMIN << (bid - 1);
for (j = 0; j < SH_ROW; ++j) {
shared_check[j] = -1;
}
for (j = arpt[i]; j < arpt[i + 1]; ++j) {
t_acol = acol[j];
t_aval = aval[j];
for (k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
t_val = multop(t_aval, bval[k]);
key = bcol[k];
hash = (key * HASH_SCAL) & (SH_ROW - 1);
while (1) {
if (shared_check[hash] == key) {
shared_value[hash] = addop(t_val, shared_value[hash]);
break;
}
else if (shared_check[hash] == -1) {
shared_check[hash] = key;
shared_value[hash] = t_val;
break;
}
else {
hash = (hash + 1) & (SH_ROW - 1);
}
}
}
}
index = 0;
if (sortOutput) {
IT nz = crpt[i + 1] - offset;
vector<pair<IT, NT>> p_vec(nz);
for (j = 0; j < SH_ROW; ++j) {
if (shared_check[j] != -1) {
p_vec[index++] = make_pair(shared_check[j], shared_value[j]);
}
}
sort(p_vec.begin(), p_vec.end(), sort_less<IT, NT>);
for (j = 0; j < index; ++j) {
ccol[offset + j] = p_vec[j].first;
cval[offset + j] = p_vec[j].second;
}
}
else {
for (j = 0; j < SH_ROW; ++j) {
if (shared_check[j] != -1) {
ccol[offset + index] = shared_check[j];
cval[offset + index] = shared_value[j];
index++;
}
}
}
}
}
}
}
template <bool sortOutput, typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
void HashSpGEMM(const CSR<IT, NT> &a, const CSR<IT, NT> &b, CSR<IT, NT> &c, MultiplyOperation multop, AddOperation addop, unsigned threadCount)
{
BIN<IT, NT> bin(a.rows, IMB_PWMIN, threadCount);
c.rows = a.rows;
c.cols = b.cols;
c.zerobased = true;
/* Set max bin */
bin.set_max_bin(a.rowptr, a.colids, b.rowptr, c.rows, c.cols);
/* Create hash table (thread local) */
bin.create_local_hash_table(c.cols);
/* Symbolic Phase */
c.rowptr = my_malloc<IT>(c.rows + 1);
hash_symbolic(a.rowptr, a.colids, b.rowptr, b.colids, c.rowptr, bin, c.rows, &(c.nnz), threadCount);
c.colids = my_malloc<IT>(c.nnz);
c.values = my_malloc<NT>(c.nnz);
// only non-vector case implemented
hash_numeric<sortOutput>(a.rowptr, a.colids, a.values, b.rowptr, b.colids, b.values, c.rowptr, c.colids, c.values, bin, multop, addop, threadCount);
}
#endif
|
GB_assign_zombie5.c | //------------------------------------------------------------------------------
// GB_assign_zombie5: delete entries in C for C_replace_phase
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// For GrB_Matrix_assign, C(I,J)<M,repl>=..., if C_replace is true, and mask M
// is present, then any entry C(i,j) outside IxJ must be be deleted, if
// M(i,j)=0.
// See also GB_assign_zombie3 and GB_assign_zombie4.
#include "GB_assign.h"
#include "GB_ek_slice.h"
#define GB_FREE_WORK \
GB_ek_slice_free (&pstart_slice, &kfirst_slice, &klast_slice, ntasks) ;
GrB_Info GB_assign_zombie5
(
GrB_Matrix Z, // the matrix C, or a copy
const GrB_Matrix M,
const bool Mask_comp,
const GrB_Index *I,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
GB_Context Context
)
{
//--------------------------------------------------------------------------
// get Z
//--------------------------------------------------------------------------
const int64_t *restrict Zh = Z->h ;
const int64_t *restrict Zp = Z->p ;
// const int64_t Znvec = Z->nvec ;
int64_t *restrict Zi = Z->i ;
int64_t nzombies = Z->nzombies ;
const int64_t znz = GB_NNZ (Z) ;
//--------------------------------------------------------------------------
// get M
//--------------------------------------------------------------------------
const int64_t *restrict Mh = M->h ;
const int64_t *restrict Mp = M->p ;
const int64_t *restrict Mi = M->i ;
const GB_void *restrict Mx = M->x ;
const size_t msize = M->type->size ;
const GB_cast_function cast_M =
GB_cast_factory (GB_BOOL_code, M->type->code) ;
const int64_t Mnvec = M->nvec ;
const bool M_is_hyper = M->is_hyper ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (znz, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ;
//--------------------------------------------------------------------------
// slice the entries for each task
//--------------------------------------------------------------------------
// Task tid does entries pstart_slice [tid] to pstart_slice [tid+1]-1 and
// vectors kfirst_slice [tid] to klast_slice [tid]. The first and last
// vectors may be shared with prior slices and subsequent slices.
int64_t *pstart_slice = NULL, *kfirst_slice = NULL, *klast_slice = NULL ;
if (!GB_ek_slice (&pstart_slice, &kfirst_slice, &klast_slice, Z, ntasks))
{
// out of memory
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// each task creates its own zombies
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (int tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// get the task description
//----------------------------------------------------------------------
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
//----------------------------------------------------------------------
// scan vectors kfirst to klast for entries to delete
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get Z(:,j) and determine if j is outside the list J
//------------------------------------------------------------------
int64_t j = (Zh == NULL) ? k : Zh [k] ;
// j_outside is true if column j is outside the Z(I,J) submatrix
bool j_outside = !GB_ij_is_in_list (J, nJ, j, Jkind, Jcolon) ;
int64_t pZ_start, pZ_end ;
GB_get_pA_and_pC (&pZ_start, &pZ_end, NULL,
tid, k, kfirst, klast, pstart_slice, NULL, NULL, Zp) ;
//------------------------------------------------------------------
// get M(:,j)
//------------------------------------------------------------------
int64_t pM_start, pM_end ;
int64_t pleft = 0 ;
int64_t pright = Mnvec - 1 ;
GB_lookup (M_is_hyper, Mh, Mp, &pleft, pright, j,
&pM_start, &pM_end) ;
//------------------------------------------------------------------
// iterate over all entries in Z(:,j)
//------------------------------------------------------------------
for (int64_t pZ = pZ_start ; pZ < pZ_end ; pZ++)
{
//--------------------------------------------------------------
// consider Z(i,j)
//--------------------------------------------------------------
// Z(i,j) is outside the Z(I,J) submatrix if either i is
// not in the list I, or j is not in J, or both.
int64_t i = Zi [pZ] ;
if (!GB_IS_ZOMBIE (i) &&
(j_outside || !GB_ij_is_in_list (I, nI, i, Ikind, Icolon)))
{
//----------------------------------------------------------
// Z(i,j) is a live entry not in the Z(I,J) submatrix
//----------------------------------------------------------
// Check the mask M to see if it should be deleted.
int64_t pM = pM_start ;
int64_t pright = pM_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Mi, pM, pright, found) ;
bool mij = false ;
if (found)
{
// found it
cast_M (&mij, Mx +(pM*msize), 0) ;
}
if (Mask_comp)
{
// negate the mask if Mask_comp is true
mij = !mij ;
}
if (!mij)
{
// delete Z(i,j) by marking it as a zombie
nzombies++ ;
Zi [pZ] = GB_FLIP (i) ;
}
}
}
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
Z->nzombies = nzombies ;
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
}
|
consecutive_write.c | #include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "constants.h"
/**
* Deinterleave (transpose) an IQUV ring buffer page to the ordering needed for FITS files
* Note that this is probably a slow function, and is not meant to be run real-time
*
* data in: tab, channel/4, time/500 packets of time,channel,pn
* data out: tab, channel, pol, time
*
* Suggested use is:
* 1. realtime: ringbuffer -> [trigger] -> dada_dbdisk
* 2. offline: dada_dbdisk -> ringbuffer -> dadafits
*
* @param {const char *} page Ringbuffer page with interleaved data
* @param {const char *} transposed
* @param {int} ntabs Number of tabs
* @param {int} nchannels Number of channels
* @param {int} npackets Number of packets per sequence
*/
void deinterleave (const unsigned char *page, unsigned char *transposed, const int ntabs, const int nchannels, const int npackets) {
const unsigned char *packet = page;
int tab = 0;
for (tab = 0; tab < ntabs; tab++) {
int channel_offset = 0;
for (channel_offset = 0; channel_offset < nchannels; channel_offset+=4) {
const unsigned char *src = &page[(tab * nchannels + channel_offset)*NPOLS*npackets*NSAMPS];
unsigned char *dest = &transposed[(tab * nchannels + channel_offset)*NPOLS*npackets*NSAMPS];
int pc;
for (pc=0; pc < 16; pc+=4) {
int tn = 0;
#pragma omp parallel for
for (tn = 0; tn < npackets*NSAMPS; tn++) {
unsigned int data = *((unsigned int *) &src[tn*NPOLS*NCHANS + pc]);
dest[tn + (pc + 0) * npackets*NSAMPS] = (data >> 0) && 255; // src[tn*NPOLS*NCHANS + 0];
dest[tn + (pc + 1) * npackets*NSAMPS] = (data >> 8) && 255; // src[tn*NPOLS*NCHANS + 1];
dest[tn + (pc + 2) * npackets*NSAMPS] = (data >> 16) && 255; // src[tn*NPOLS*NCHANS + 2];
dest[tn + (pc + 3) * npackets*NSAMPS] = (data >> 24) && 255; // src[tn*NPOLS*NCHANS + 3];
}
}
} // channel_offset
} // tab
}
/* Manually unrolling the loop over pc gives slight speedups; code below for reference
*
int tn = 0;
for (tn = 0; tn < npackets*NSAMPS; tn++) {
unsigned int data = *((unsigned int *) &src[tn*NPOLS*NCHANS]);
dest[tn + 0 * npackets*NSAMPS] = (data >> 0) && 255; // src[tn*NPOLS*NCHANS + 0];
dest[tn + 1 * npackets*NSAMPS] = (data >> 8) && 255; // src[tn*NPOLS*NCHANS + 1];
dest[tn + 2 * npackets*NSAMPS] = (data >> 16) && 255; // src[tn*NPOLS*NCHANS + 2];
dest[tn + 3 * npackets*NSAMPS] = (data >> 24) && 255; // src[tn*NPOLS*NCHANS + 3];
}
for (tn = 0; tn < npackets*NSAMPS; tn++) {
unsigned int data = *((unsigned int *) &src[tn*NPOLS*NCHANS + 4]);
dest[tn + 4 * npackets*NSAMPS] = (data >> 0) && 255; // src[tn*NPOLS*NCHANS + 4];
dest[tn + 5 * npackets*NSAMPS] = (data >> 8) && 255; // src[tn*NPOLS*NCHANS + 5];
dest[tn + 6 * npackets*NSAMPS] = (data >> 16) && 255; // src[tn*NPOLS*NCHANS + 6];
dest[tn + 7 * npackets*NSAMPS] = (data >> 24) && 255; // src[tn*NPOLS*NCHANS + 7];
};
for (tn = 0; tn < npackets*NSAMPS; tn++) {
unsigned int data = *((unsigned int *) &src[tn*NPOLS*NCHANS + 8]);
dest[tn + 8 * npackets*NSAMPS] = (data >> 0) && 255; // src[tn*NPOLS*NCHANS + 8];
dest[tn + 9 * npackets*NSAMPS] = (data >> 8) && 255; // src[tn*NPOLS*NCHANS + 9];
dest[tn + 10 * npackets*NSAMPS] = (data >> 16) && 255; // src[tn*NPOLS*NCHANS + 10];
dest[tn + 11 * npackets*NSAMPS] = (data >> 24) && 255; // src[tn*NPOLS*NCHANS + 11];
};
for (tn = 0; tn < npackets*NSAMPS; tn++) {
unsigned int data = *((unsigned int *) &src[tn*NPOLS*NCHANS] + 12);
dest[tn + 12 * npackets*NSAMPS] = (data >> 0) && 255; // src[tn*NPOLS*NCHANS + 12];
dest[tn + 13 * npackets*NSAMPS] = (data >> 8) && 255; // src[tn*NPOLS*NCHANS + 13];
dest[tn + 14 * npackets*NSAMPS] = (data >> 16) && 255; // src[tn*NPOLS*NCHANS + 14];
dest[tn + 15 * npackets*NSAMPS] = (data >> 24) && 255; // src[tn*NPOLS*NCHANS + 15];
}
*/
|
omt_solver.h | #pragma once
// std
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
#include <list>
#include <set>
#include <unistd.h> // sleep
// eigen
#include <Eigen/Dense>
// other headers
#include "global.h"
#include "data_engine.h" // todo: remove.
#include "geodesic/Xin_Wang.h"
// cluster
struct Cluster
{
int label = -1;
Eigen::Vector2d centroid;
vector<int> samples;
Cluster()
{
label = -1;
centroid = Eigen::Vector2d(0, 0);
if (!samples.empty())
samples.clear();
}
Cluster(int l, Eigen::Vector2d p, vector<int> s)
{
label = l;
centroid = Eigen::Vector2d(p.x(), p.y());
samples.clear();
for (int i = 0; i < s.size(); i++)
samples.push_back(s[i]);
}
Cluster(const Cluster & other)
{
label = other.label;
centroid = Eigen::Vector2d(other.centroid.x(), other.centroid.y());
samples.clear();
for (int i = 0; i < other.samples.size(); i++)
samples.push_back(other.samples[i]);
}
Cluster & operator = (const Cluster & other)
{
label = other.label;
centroid = Eigen::Vector2d(other.centroid.x(), other.centroid.y());
samples.clear();
for (int i = 0; i < other.samples.size(); i++)
samples.push_back(other.samples[i]);
return *this;
}
};
// omt solver
class DiscreteSolver
{
// input: domain, cost metric, sources, targets.
// output: assignment from sources to targets.
DataEngine* m_p_de; // domain.
CDT m_domain; // domain.
DistanceMetric* m_metric; // metric.
//vector<double>(*CostFunc)(Eigen::Vector2d, vector<Eigen::Vector2d>); // metric. todo: remove.
std::vector<Eigen::Vector2d> m_sources; // robot coordinates.
std::vector<Eigen::Vector2d> m_targets; // task coordinates.
double m_delta; // param delta to constrain the compactness and capacity.
std::vector<std::vector<int>> m_result_s_t; // results.
public:
// constructor.
DiscreteSolver(DataEngine& de, CDT d,
DistanceMetric& dm,
//vector<double>(*cf)(Eigen::Vector2d, vector<Eigen::Vector2d>),
vector<Eigen::Vector2d> s, vector<Eigen::Vector2d> t, double delta)
{
// domain
m_p_de = &de;
// domain
this->m_domain = CDT(d);
// metric
m_metric = &dm;
// metric
//CostFunc = cf;
// sources
this->m_sources.clear();
for (int i = 0; i < s.size(); i++)
this->m_sources.push_back(s[i]);
// targets
this->m_targets.clear();
for (int i = 0; i < t.size(); i++)
this->m_targets.push_back(t[i]);
// delta
this->m_delta = delta;
// results
this->m_result_s_t.clear();
this->m_result_s_t.resize(this->m_sources.size());
// end.
return;
}
// destructor
~DiscreteSolver()
{
// todo: CDT need manually release?
vector<Eigen::Vector2d>().swap(m_sources);
vector<Eigen::Vector2d>().swap(m_targets);
for (int i = 0; i < m_result_s_t.size(); i++)
vector<int>().swap(m_result_s_t[i]);
vector<vector<int>>().swap(m_result_s_t);
return;
}
// execute the solveer
bool SolveOMT(int mode = 0);
// results
std::vector<std::vector<int>> getSolution(){ return m_result_s_t; }
// visualize domain
void CerrDomainCDT();
// visualize xxx
void CerrClusters(std::vector<Cluster> clusters);
//private:
// cluster
std::vector<Cluster> ClusterTargets(int mode = 0);
// match
std::vector<int> Match(std::vector<Cluster> clusters);
};
using namespace std;
// hungarian algorithm O(n^3)
vector<int> hungarian_algorithm(Eigen::MatrixXd cost)
{
/* cost matrix format
target_1 target_2 ... target_m
source_1
source_2
.
.
.
source_m
*/
// detail info
bool log = false;
// results
vector<int> assignment(cost.rows());
// Step 1: Subtract row minima
Step1:
{
for (int r = 0; r < cost.rows(); r++)
{
double minima = DBL_MAX;
for (int c = 0; c < cost.cols(); c++)
if (cost(r, c) < minima)
minima = cost(r, c);
for (int c = 0; c < cost.cols(); c++)
cost(r, c) -= minima;
}
if (log)
cerr << "Subtract row minima" << endl << cost << endl << endl;
}
// Step 2: Subtract column minima
Step2:
{
for (int c = 0; c < cost.cols(); c++)
{
double minima = DBL_MAX;
for (int r = 0; r < cost.rows(); r++)
if (cost(r, c) < minima)
minima = cost(r, c);
for (int r = 0; r < cost.rows(); r++)
cost(r, c) -= minima;
}
if (log)
cerr << "Subtract column minima" << endl << cost << endl << endl;
}
// Step 3: Cover all zeros with a minimum number of lines
vector<bool> tick_rows(cost.rows());
vector<bool> tick_cols(cost.cols());
vector<vector<bool>> circle;
vector<vector<bool>> xxxxxx;
Step3:
circle.clear();
xxxxxx.clear();
{
circle.resize(cost.rows());
xxxxxx.resize(cost.rows());
for (int r = 0; r < cost.rows(); r++)
{
circle[r].resize(cost.cols());
xxxxxx[r].resize(cost.cols());
for (int c = 0; c < cost.cols(); c++)
{
circle[r][c] = false;
xxxxxx[r][c] = false;
}
}
}
Check3:
tick_rows.clear();
tick_rows.resize(cost.rows());
tick_cols.clear();
tick_cols.resize(cost.cols());
{
// count num of '0'
vector<int> num_rows_0(cost.rows());
vector<int> num_cols_0(cost.cols());
{
for (int r = 0; r < num_rows_0.size(); r++)
num_rows_0[r] = 0;
for (int c = 0; c < num_cols_0.size(); c++)
num_cols_0[c] = 0;
for (int r = 0; r < cost.rows(); r++)
{
for (int c = 0; c < cost.cols(); c++)
{
if (cost(r, c) < 0.0001)
{
num_rows_0[r]++;
num_cols_0[c]++;
}
}
}
}
// label '0' by circle or xxxxxx
{
// labeling
int circle_num = 0;
int xxxxxx_num = 0;
int circle_crt = 0;
int xxxxxx_crt = 0;
labeling:
circle_crt = 0;
xxxxxx_crt = 0;
for (int r = 0; r < cost.rows(); r++)
{
int count = 0;
int index = -1;
for (int c = 0; c < cost.cols(); c++)
//if (cost(r, c) < 0.0001)
if (cost(r, c) < 0.0001 && !xxxxxx[r][c])
{
count++;
index = c;
}
if (count == 1)
{
circle[r][index] = true;
for (int tr = 0; tr < cost.rows(); tr++)
if (cost(tr, index) < 0.0001 && !circle[tr][index])
xxxxxx[tr][index] = true;
}
}
for (int c = 0; c < cost.cols(); c++)
{
int count = 0;
int index = -1;
for (int r = 0; r < cost.rows(); r++)
if (cost(r, c) < 0.0001 && !xxxxxx[r][c])
{
count++;
index = r;
}
if (count == 1)
{
circle[index][c] = true;
for (int tc = 0; tc < cost.cols(); tc++)
if (cost(index, tc) < 0.0001 && !circle[index][tc])
xxxxxx[index][tc] = true;
}
}
// update crt
for (int r = 0; r < cost.rows(); r++)
{
for (int c = 0; c < cost.cols(); c++)
{
if (circle[r][c])
circle_crt++;
if (xxxxxx[r][c])
xxxxxx_crt++;
}
}
// crt vs. num
if (circle_crt == circle_num && xxxxxx_crt == xxxxxx_num)
{
}
else
{
circle_num = circle_crt;
xxxxxx_num = xxxxxx_crt;
goto labeling;
}
}
// check if left '0' unlabeled
Check:
{
for (int r = 0; r < cost.rows(); r++)
{
for (int c = 0; c < cost.cols(); c++)
{
if (!circle[r][c] && !xxxxxx[r][c] && cost(r, c) < 0.0001)
{
circle[r][c] = true;
for (int tr = 0; tr < cost.rows(); tr++)
{
if (cost(tr, c) < 0.0001
&& !circle[tr][c] && !xxxxxx[tr][c])
{
xxxxxx[tr][c] = true;
}
}
for (int tc = 0; tc < cost.cols(); tc++)
{
if (cost(r, tc) < 0.0001
&& !circle[r][tc] && !xxxxxx[r][tc])
{
xxxxxx[r][tc] = true;
}
}
goto Check3;
}
}
}
}
// line cover
{
// tick
{
// init
for (int r = 0; r < tick_rows.size(); r++)
tick_rows[r] = false;
for (int c = 0; c < tick_cols.size(); c++)
tick_cols[c] = false;
// tick step 1
for (int r = 0; r < cost.rows(); r++)
{
bool exist = false;
for (int c = 0; c < cost.cols(); c++)
if (circle[r][c])
exist = true;
if (!exist)
{
tick_rows[r] = true;
}
}
// loop
int tick_num = 0;
for (int r = 0; r < tick_rows.size(); r++)
if (tick_rows[r])
tick_num++;
for (int c = 0; c < tick_cols.size(); c++)
if (tick_cols[c])
tick_num++;
int crt_num = 0;
while (crt_num != tick_num)
{
tick_num = crt_num;
// tick step 2
for (int r = 0; r < cost.rows(); r++)
{
if (tick_rows[r])
{
for (int c = 0; c < cost.cols(); c++)
{
if (cost(r, c) < 0.0001 && xxxxxx[r][c])
{
tick_cols[c] = true;
}
}
}
}
// tick step 3
for (int c = 0; c < cost.cols(); c++)
{
if (tick_cols[c])
{
for (int r = 0; r < cost.rows(); r++)
{
if (cost(r, c) < 0.0001 && circle[r][c])
{
tick_rows[r] = true;
}
}
}
}
// update crt_num
crt_num = 0;
for (int r = 0; r < tick_rows.size(); r++)
if (tick_rows[r])
crt_num++;
for (int c = 0; c < tick_cols.size(); c++)
if (tick_cols[c])
crt_num++;
}
}
// check
if (log)
{
Eigen::MatrixXd lmat(cost.rows(), cost.cols());
for (int r = 0; r < lmat.rows(); r++)
for (int c = 0; c < lmat.cols(); c++)
lmat(r, c) = 0;
for (int r = 0; r < cost.rows(); r++)
if (!tick_rows[r])
for (int c = 0; c < cost.cols(); c++)
lmat(r, c) = 1;
for (int c = 0; c < cost.cols(); c++)
if (tick_cols[c])
for (int r = 0; r < cost.rows(); r++)
lmat(r, c) = 1;
cerr << "Cover all zeros with a minimum number of lines" << endl
<< lmat << endl << endl;
}
// check number of line cover
{
int line_num = 0;
for (int r = 0; r < cost.rows(); r++)
if (!tick_rows[r])
{
line_num++;
}
for (int c = 0; c < cost.cols(); c++)
if (tick_cols[c])
{
line_num++;
}
if (line_num == cost.rows())
goto Final;
else
goto Step4;
}
}
}
// Step 4: Create additional zeros
Step4:
{
// line covered
vector<vector<bool>> covered;
// init
covered.resize(cost.rows());
for (int r = 0; r < cost.rows(); r++)
{
covered[r].resize(cost.cols());
for (int c = 0; c < cost.cols(); c++)
{
covered[r][c] = false;
}
}
// check number of line cover
{
for (int r = 0; r < cost.rows(); r++)
if (!tick_rows[r])
{
// line cover
for (int c = 0; c < cost.cols(); c++)
{
covered[r][c] = true;
}
}
for (int c = 0; c < cost.cols(); c++)
if (tick_cols[c])
{
// line cover
for (int r = 0; r < cost.rows(); r++)
{
covered[r][c] = true;
}
}
}
// minima of uncovered
{
double minima = DBL_MAX;
for (int r = 0; r < cost.rows(); r++)
{
for (int c = 0; c < cost.cols(); c++)
{
if (!covered[r][c])
{
if (cost(r, c) < minima)
{
minima = cost(r, c);
}
}
}
}
// plus and minus
{
for (int r = 0; r < cost.rows(); r++)
{
for (int c = 0; c < cost.cols(); c++)
{
if (!covered[r][c])
{
cost(r, c) -= minima;
}
else
{
if (!tick_rows[r] && tick_cols[c])
cost(r, c) += minima;
}
}
}
}
}
if (log)
cerr << "Create additional zeros" << endl << cost << endl << endl;
goto Step3;
}
// final step
Final:
{
// save assignment
for (int r = 0; r < cost.rows(); r++)
{
for (int c = 0; c < cost.cols(); c++)
{
if (circle[r][c])
{
assignment[r] = c;
}
}
}
// log
if (log)
{
Eigen::MatrixXd opt(cost.rows(), cost.cols());
for (int r = 0; r < opt.rows(); r++)
for (int c = 0; c < opt.cols(); c++)
opt(r, c) = 0;
for (int r = 0; r < cost.rows(); r++)
for (int c = 0; c < cost.cols(); c++)
if (circle[r][c])
opt(r, c) = 1;
cerr << "result" << endl << opt << endl << endl;
}
}
return assignment;
}
// visualize cdt
void DiscreteSolver::CerrDomainCDT()
{
// cdt
for (auto it = m_domain.finite_faces_begin(); it != m_domain.finite_faces_end(); it++)
{
if (it->info().in_domain())
{
////
//cerr << "plt.plot([" << it->vertex(0)->point().x() << ", " << it->vertex(1)->point().x() << "], ["
// << it->vertex(0)->point().y() << ", " << it->vertex(1)->point().y() << "], 'k-')" << endl;
//cerr << "plt.plot([" << it->vertex(1)->point().x() << ", " << it->vertex(2)->point().x() << "], ["
// << it->vertex(1)->point().y() << ", " << it->vertex(2)->point().y() << "], 'k-')" << endl;
//cerr << "plt.plot([" << it->vertex(2)->point().x() << ", " << it->vertex(0)->point().x() << "], ["
// << it->vertex(2)->point().y() << ", " << it->vertex(0)->point().y() << "], 'k-')" << endl;
//
cerr << "plt.fill([" << it->vertex(0)->point().x() << ", " << it->vertex(1)->point().x() << ", " << it->vertex(2)->point().x()
<< "], [" << it->vertex(0)->point().y() << ", " << it->vertex(1)->point().y() << ", " << it->vertex(2)->point().y()
<< "], color=[0.7, 0.7, 0.7])" << endl;
}
}
// end
return;
}
// visualize clusters
void DiscreteSolver::CerrClusters(vector<Cluster> clusters)
{
for (int cid = 0; cid < clusters.size(); cid++)
{
double r = (double)rand() / RAND_MAX;
double g = (double)rand() / RAND_MAX;
double b = (double)rand() / RAND_MAX;
// samples
for (int sid = 0; sid < clusters[cid].samples.size(); sid++)
{
int tid = clusters[cid].samples[sid];
char sentence[200];
sprintf(sentence, "plt.plot(%f, %f, color=[%f, %f, %f])",
m_targets[tid].x(), m_targets[tid].y(), r, g, b);
cerr << sentence << endl;
}
// centroid
char sentence[200];
sprintf(sentence, "plt.plot(%f, %f, marker='x', color=[%f, %f, %f])",
clusters[cid].centroid.x(), clusters[cid].centroid.y(), r, g, b);
cerr << sentence << endl;
}
return;
}
// cluster targets
vector<Cluster> DiscreteSolver::ClusterTargets(int mode)
{
// timing
double t_beg = clock();
// set up params
int max_iter_times = 10;
// initialization
vector<Cluster> clusters;
clusters.resize(m_sources.size());
for (int idx = 0; idx < clusters.size(); idx++)
clusters[idx].centroid = m_sources[idx];
// iteration begin
for (int it = 0; it < max_iter_times; it++)
{
cerr << "cluster iteration = " << it << endl;
// record last centroid
vector<Eigen::Vector2d> last_centroids(clusters.size());
for (int cid = 0; cid < clusters.size(); cid++)
last_centroids[cid] = Eigen::Vector2d(clusters[cid].centroid.x(), clusters[cid].centroid.y());
// clear samples
for (int cid = 0; cid < clusters.size(); cid++)
clusters[cid].samples.clear();
// compute label for each sample
{
// memory alloc
int dRange = max_iter_times * m_targets.size();
double ** m_tid_cid_d = new double*[dRange];
while (m_tid_cid_d == 0)
{
cerr << "memory error, re assign memory..." << endl;
usleep(100000);
m_tid_cid_d = new double*[dRange];
}
for (int tid = 0; tid < dRange; tid++)
{
m_tid_cid_d[tid] = new double[dRange];
while (m_tid_cid_d[tid] == 0)
{
cerr << "memory error, re assign memory..." << endl;
usleep(100000);
m_tid_cid_d[tid] = new double[dRange];
}
}
// compute distance
#pragma omp parallel for num_threads(omp_get_num_procs())
for (int cid = 0; cid < clusters.size(); cid++)
{
//vector<double> cDistances = CostFunc(clusters[cid].centroid, m_targets); // compute distance
vector<double> cDistances = m_metric->GeodesicDistances(clusters[cid].centroid, m_targets); // compute distance
for (int tid = 0; tid < m_targets.size(); tid++)
{
if (cid >= dRange)
{
char str[200];
sprintf(str, "cid = %d, dRange = %d", cid, dRange);
cout << str << endl;
cout << "error: cid out bound" << endl;
}
if (tid >= dRange)
{
char str[200];
sprintf(str, "tid = %d, dRange = %d", tid, dRange);
cout << str << endl;
cout << "error: tid out bound" << endl;
}
m_tid_cid_d[tid][cid] = cDistances[tid];
}
}
// compute label
for (int tid = 0; tid < m_targets.size(); tid++)
{
int c = 0;
double min_distance = DBL_MAX;
for (int cid = 0; cid < clusters.size(); cid++)
{
double distance = m_tid_cid_d[tid][cid];
if (distance < min_distance)
{
min_distance = distance;
c = cid;
}
}
clusters[c].samples.push_back(tid);
}
// memory release
for (int tid = 0; tid < dRange; tid++)
delete[] m_tid_cid_d[tid];
delete[] m_tid_cid_d;
}
// compute centroids
#pragma omp parallel for num_threads(omp_get_num_procs())
for (int cid = 0; cid < clusters.size(); cid++)
{
// check empty
if (clusters[cid].samples.empty())
{
clusters[cid].centroid = Eigen::Vector2d(999999, 999999);
continue;
}
// compute centroid.
Eigen::Vector2d val(0, 0);
for (int sid = 0; sid < clusters[cid].samples.size(); sid++)
{
int sample = clusters[cid].samples[sid];
val = Eigen::Vector2d(val.x() + m_targets[sample].x(), val.y() + m_targets[sample].y());
}
clusters[cid].centroid = Eigen::Vector2d(val.x() / clusters[cid].samples.size(), val.y() / clusters[cid].samples.size());
// inner centroid. cellmap?
{
int r = (int)round(-clusters[cid].centroid.y());
int c = (int)round(clusters[cid].centroid.x());
// check out bound
if (m_p_de->m_recon2D.m_cellmap[r][c].isScanned && m_p_de->m_recon2D.m_cellmap[r][c].isFree)
{
; // nothing to do here.
}
else
{
// replease with the closest point in domain
double min_d = DBL_MAX;
Eigen::Vector2d min_p;
for (auto it = m_domain.vertices_begin(); it != m_domain.vertices_end(); it++)
{
Eigen::Vector2d p(it->point().x(), it->point().y());
double d = (clusters[cid].centroid - p).norm();
if (d < min_d)
{
min_d = d;
min_p = Eigen::Vector2d(p);
}
}
clusters[cid].centroid = min_p;
}
}
}
// split (compactness & capacity)
for (int cid = 0; cid < clusters.size(); cid++) // for each cluster, check innner distance
{
if (clusters[cid].samples.size() <= 1) continue;
// compute iner distances
vector<Eigen::Vector2d> targets;
for (int tid = 0; tid < clusters[cid].samples.size(); tid++)
targets.push_back(Eigen::Vector2d(m_targets[clusters[cid].samples[tid]].x(), m_targets[clusters[cid].samples[tid]].y()));
//vector<double> inDistances = CostFunc(clusters[cid].centroid, targets);
vector<double> inDistances = m_metric->GeodesicDistances(clusters[cid].centroid, targets);
// max distance element
int maxID = -1;
double maxDistance = -1;
for (int i = 0; i < inDistances.size(); i++)
{
if (maxDistance < inDistances[i])
{
maxDistance = inDistances[i];
maxID = i;
}
}
if (maxID == -1)
{
cerr << "error: maxID == -1." << endl;
getchar(); getchar(); getchar();
}
if (maxID >= clusters[cid].samples.size() || maxID >= m_targets.size())
{
cerr << "error: maxID out bound." << endl;
cerr << "maxID = " << maxID << endl;
cerr << "cluster samples num = " << clusters[cid].samples.size() << endl;
cerr << "targets num = " << m_targets.size() << endl;
getchar(); getchar(); getchar();
}
// if need split
if (maxDistance > compactParam)
{
// insert new cluster
Cluster new_cluster;
new_cluster.label = -1; // todo: id.
new_cluster.samples.push_back(clusters[cid].samples[maxID]);
new_cluster.centroid = Eigen::Vector2d(m_targets[clusters[cid].samples[maxID]].x(), m_targets[clusters[cid].samples[maxID]].y());
clusters.push_back(new_cluster);
// erase the sample from origin cluster
clusters[cid].samples.erase(clusters[cid].samples.begin() + maxID);
}
}
// merge
int valid_cluster_num = 0;
for (int cid = 0; cid < clusters.size(); cid++)
{
if (clusters[cid].samples.empty())
continue;
valid_cluster_num++;
}
if (valid_cluster_num <= m_sources.size()) // useless?
{
}
//else if (!allowFreeRobot) // useless?
//{
//}
else
{
bool nMerge = false;
for (int cid = 0; cid < clusters.size() - 1; cid++)
{
if (clusters[cid].samples.empty()) continue;
for (int oid = cid + 1; oid < clusters.size(); oid++)
{
if (clusters[oid].samples.empty()) continue;
// compute distance
Point_2 p1(clusters[cid].centroid.x(), clusters[cid].centroid.y());
Point_2 p2(clusters[oid].centroid.x(), clusters[oid].centroid.y());
double outerD = m_metric->get_geodesic_distance_fast(p1, p2); // todo: change it to func pointer.
if (outerD < compactParam) // need to merge
{
if (clusters[oid].samples.size() == 1) // todo: .
{
clusters[cid].samples.push_back(clusters[oid].samples[0]);
clusters[oid].samples.clear();
nMerge = true;
}
else if (clusters[cid].samples.size() == 1)
{
clusters[oid].samples.push_back(clusters[cid].samples[0]);
clusters[cid].samples.clear();
nMerge = true;
}
// merge done.
valid_cluster_num--;
//// valid--
//if (validNumber <= rbtPositions.size())
// break;
}
}
}
}
// delete empty clusters
{
vector<Cluster> temp_clusters;
for (int cid = 0; cid < clusters.size(); cid++)
{
if (!clusters[cid].samples.empty())
{
temp_clusters.push_back(Cluster(clusters[cid]));
}
}
vector<Cluster>().swap(clusters);
clusters = temp_clusters;
}
// update centroids
{
for (int cid = 0; cid < clusters.size(); cid++)
{
if (clusters[cid].samples.empty())
{
clusters[cid].centroid = Eigen::Vector2d(999999, 999999);
continue;
}
// update centroid
Eigen::Vector2d val(0, 0);
for (int sid = 0; sid < clusters[cid].samples.size(); sid++)
{
int sample = clusters[cid].samples[sid];
val = Eigen::Vector2d(val.x() + m_targets[sample].x(), val.y() + m_targets[sample].y());
}
clusters[cid].centroid = Eigen::Vector2d(val.x() / clusters[cid].samples.size(), val.y() / clusters[cid].samples.size());
// inner centroid. cellmap?
{
int r = (int)round(-clusters[cid].centroid.y());
int c = (int)round(clusters[cid].centroid.x());
// check out bound
if (m_p_de->m_recon2D.m_cellmap[r][c].isScanned && m_p_de->m_recon2D.m_cellmap[r][c].isFree)
{
; // nothing to do here.
}
else
{
// replease with the closest point in domain
double min_d = DBL_MAX;
Eigen::Vector2d min_p;
for (auto it = m_domain.vertices_begin(); it != m_domain.vertices_end(); it++)
{
Eigen::Vector2d p(it->point().x(), it->point().y());
double d = (clusters[cid].centroid - p).norm();
if (d < min_d)
{
min_d = d;
min_p = Eigen::Vector2d(p);
}
}
clusters[cid].centroid = min_p;
}
}
}
}
// iteration difference
{
bool terminate = false;
if (last_centroids.size() == clusters.size())
{
double diff = 0;
for (int cid = 0; cid < clusters.size(); cid++)
{
diff += (last_centroids[cid] - clusters[cid].centroid).norm(); // todo: func pointer.
if (diff > 0.001)
{
break;
}
}
if (diff < 0.001) terminate = true;
}
// terminate
if (terminate)
{
cerr << "converged at iter " << it << "." << endl;
break;
}
}
// if (!allowFreeRobot) // avoid #cluster_not_empty < #robot
// {
// // compute valid cluster number
// int cnt = 0;
// for (int rid = 0; rid < regions.size(); rid++)
// {
// if (regions[rid].samples.empty())
// continue;
// cnt++;
// }
// while (cnt < rbtPositions.size() && tasks.size() >= rbtPositions.size()) // if #cluster_not_empty < #robot, split a cluster.
// {
// //cerr << "robot free of tasks, finding split..." << endl;
// //cout << "robot free of tasks, finding split..." << endl;
// //cout << "#rbt = " << rbtPositions.size() << ", #cluster = " << cnt << endl;
// // find the selected region: farthest iner distance
// double farInerDistance = -1;
// int selectedRid = -1;
// int selectedSid = -1;
// for (int orid = 0; orid < regions.size(); orid++)
// {
// if (regions[orid].samples.size() <= 1)
// continue;
// for (int sid = 0; sid < regions[orid].samples.size(); sid++)
// {
// int tid = regions[orid].samples[sid];
// // compare option 2: distance from centroid. 2018-10-22.
// //double distance = m_metric->get_geodesic_distance(regions[orid].centroid, Point_2(tasks[tid].pose.translation().x(), tasks[tid].pose.translation().y()));
// double distance = m_metric->get_geodesic_distance_fast(regions[orid].centroid, Point_2(tasks[tid].pose.translation().x(), tasks[tid].pose.translation().y()));
// if (distance > farInerDistance)
// {
// farInerDistance = distance;
// selectedRid = orid;
// selectedSid = sid;
// }
// }
// }
// // no candidate
// if (selectedRid == -1 || selectedSid == -1)
// {
// break;
// }
//
// // insert the sample
// Region nRegion;
// nRegion.label = -1;
// nRegion.samples.push_back(regions[selectedRid].samples[selectedSid]);
// nRegion.centroid = Point_2(tasks[regions[selectedRid].samples[selectedSid]].pose.translation().x(), tasks[regions[selectedRid].samples[selectedSid]].pose.translation().y());
// regions.push_back(nRegion);
// cnt++; // update count.
//
// // remove the sample
// regions[selectedRid].samples.erase(regions[selectedRid].samples.begin() + selectedSid);
//
// //cerr << "split cluster because of free robot." << endl;
// //cout << "split cluster because of free robot." << endl;
// //cout << "current state: " << endl;
//
// int tmpCnt = 0;
// for (int tmpID = 0; tmpID < regions.size(); tmpID++)
// {
// if (regions[tmpID].samples.empty())
// continue;
// //cout << "region_" << tmpID << " sample_0 " << tasks[regions[tmpID].samples[0]].pose.translation().transpose() << endl;
// //cout << "region_" << tmpID << " centroid " << regions[tmpID].centroid.x() << " " << regions[tmpID].centroid.y() << endl;
// tmpCnt++;
// }
// //cout << tmpCnt << " valid clusters." << endl << endl;
// }
// }
//
// // difference of centroids
// double crtResCentroids = 0;
//
// /* update centroid */ // update 2018-11-14.
// for (int rid = 0; rid < regions.size(); rid++)
// {
// if (regions[rid].samples.empty())
// {
// regions[rid].centroid = Point_2(999999, 999999);
// continue;
// }
// // update centroid
// Point_2 val(0, 0);
// for (int sid = 0; sid < regions[rid].samples.size(); sid++)
// {
// int sample = regions[rid].samples[sid];
// val = Point_2(val.x() + tasks[sample].pose.translation().x(), val.y() + tasks[sample].pose.translation().y());
// }
// regions[rid].centroid = Point_2(val.x() / regions[rid].samples.size(), val.y() / regions[rid].samples.size());
// crtResCentroids += get_euclidean_distance(lastCentroids[rid], regions[rid].centroid);
// }
//
// ///* test plot */
// //{
// // char file_dir[1000];
// // sprintf(file_dir, "data/OfflineOutput/clusters_%d.m", it);
// // ofstream ofs(file_dir);
// // vector<vector<double>> color(regions.size());
// // for (int i = 0; i < color.size(); i++)
// // {
// // color[i].resize(3);
// // for (int j = 0; j < color[i].size(); j++)
// // {
// // color[i][j] = (double)rand() / RAND_MAX;
// // }
// // }
// // for (int cid = 0; cid < regions.size(); cid++)
// // {
// // for (int sid = 0; sid < regions[cid].samples.size(); sid++)
// // {
// // char plotCode[200];
// // sprintf(plotCode, "plot([%f, %f], [%f, %f], 'Color', [%f, %f, %f]); hold on;",
// // regions[cid].centroid.x(), tasks[regions[cid].samples[sid]].pose.translation().x(),
// // regions[cid].centroid.y(), tasks[regions[cid].samples[sid]].pose.translation().y(),
// // color[cid][0], color[cid][1], color[cid][2]);
// // ofs << plotCode << endl;
// // }
// // }
// // ofs.close();
// //}
//
// /* res */
// if (crtResCentroids < 0.001)
// {
// cout << "converged at iter " << it << "." << endl;
// break;
// }
}
// delete empty clusters
{
vector<Cluster> temp_clusters;
for (int cid = 0; cid < clusters.size(); cid++)
{
if (!clusters[cid].samples.empty())
{
temp_clusters.push_back(Cluster(clusters[cid]));
}
}
vector<Cluster>().swap(clusters);
clusters = temp_clusters;
}
// timing
double t_end = clock();
cerr << "DiscreteSolver cluster timing " << (t_end - t_beg)/CLOCKS_PER_SEC << " s" << endl;
//// test show results. ckecked correct.
//{
// // seed points
// for (int sid = 0; sid < m_sources.size(); sid++)
// {
// //char color[100];
// //sprintf(color, "[%f, %f, %f]", (double)rand() / RAND_MAX, (double)rand() / RAND_MAX, (double)rand() / RAND_MAX);
// char sentence[100];
// //sprintf(sentence, "plt.plot(%f, %f, color=%s, marker='x')", m_sources[sid].x(), m_sources[sid].y(), color);
// sprintf(sentence, "plt.plot(%f, %f, color=[1, 0, 0], marker='x')", m_sources[sid].x(), m_sources[sid].y());
// cerr << sentence << endl;
// }
// // clusters
// for (int cid = 0; cid < clusters.size(); cid++)
// {
// char color[100];
// sprintf(color, "[%f, %f, %f]", (double)rand() / RAND_MAX, (double)rand() / RAND_MAX, (double)rand() / RAND_MAX);
// for (int sid = 0; sid < clusters[cid].samples.size(); sid++)
// {
// int id = clusters[cid].samples[sid];
// char sentence[100];
// sprintf(sentence, "plt.plot(%f, %f, color=%s, marker='.')", m_targets[id].x(), m_targets[id].y(), color);
// cerr << sentence << endl;
// }
// char sentence[100];
// sprintf(sentence, "plt.plot(%f, %f, color=%s, marker='+')", clusters[cid].centroid.x(), clusters[cid].centroid.y(), color);
// cerr << sentence << endl;
// }
//}
// end.
return clusters;
}
// match
vector<int> DiscreteSolver::Match(vector<Cluster> clusters)
{
//// test hungarian
//{
// //// example#1
// //Eigen::MatrixXd cost(4, 4);
// //cost << 82, 83, 69, 92,
// // 77, 37, 49, 92,
// // 11, 69, 5, 86,
// // 8, 9, 98, 23;
// //// example#2
// //Eigen::MatrixXd cost(5, 5);
// //cost << 5, 0, 2, 0, 2,
// // 2, 3, 0, 0, 0,
// // 0, 10, 5, 7, 2,
// // 9, 8, 0, 0, 4,
// // 0, 6, 3, 6, 5;
// // example#3
// Eigen::MatrixXd cost(10, 10);
// cost << 7, 54, 42, 4, 84, 24, 53, 80, 61, 14,
// 43, 30, 24, 65, 95, 9, 1, 87, 24, 28,
// 63, 23, 61, 68, 33, 37, 53, 45, 80, 44,
// 78, 99, 4, 9, 81, 65, 37, 92, 98, 85,
// 61, 15, 82, 85, 89, 11, 2, 58, 5, 24,
// 41, 39, 29, 45, 55, 19, 36, 95, 31, 51,
// 31, 98, 73, 19, 18, 66, 65, 23, 40, 8,
// 56, 37, 10, 49, 56, 64, 29, 21, 45, 90,
// 15, 81, 24, 33, 76, 3, 44, 54, 97, 70,
// 83, 4, 85, 25, 15, 9, 4, 83, 58, 94;
// // test the case
// cerr << "cost" << endl << cost << endl;
// hungarian_algorithm(cost);
//}
// init
vector<Eigen::Vector2d> centroids(clusters.size());
for (int cid = 0; cid < clusters.size(); cid++)
centroids[cid] = clusters[cid].centroid;
vector<int> matches(m_sources.size());
for (int sid = 0; sid < m_sources.size(); sid++)
matches[sid] = sid;
// compute distances
vector<vector<double>> d_s_c(m_sources.size());
for (int s = 0; s < d_s_c.size(); s++)
//d_s_c[s] = CostFunc(m_sources[s], centroids);
d_s_c[s] = m_metric->GeodesicDistances(m_sources[s], centroids);
// tune
{
// replaced by hungarian.
}
// match
int dim = max(m_sources.size(), centroids.size());
Eigen::MatrixXd cost(dim, dim);
if (m_sources.size() < centroids.size())
{
for (int r = 0; r < dim; r++)
for (int c = 0; c < dim; c++)
{
if (r >= m_sources.size())
cost(r, c) = 0.0;
else
cost(r, c) = d_s_c[r][c];
}
// match by hungarian algorithm
//cerr << "cost" << endl << cost << endl;
vector<int> assignment = hungarian_algorithm(cost);
// final match
for (int r = 0; r < m_sources.size(); r++)
matches[r] = assignment[r];
}
else if (m_sources.size() == centroids.size())
{
for (int r = 0; r < dim; r++)
for (int c = 0; c < dim; c++)
cost(r, c) = d_s_c[r][c];
// match by hungarian algorithm
//cerr << "cost" << endl << cost << endl;
matches = hungarian_algorithm(cost);
}
else
{
for (int r = 0; r < dim; r++)
for (int c = 0; c < dim; c++)
{
if (c >= centroids.size())
cost(r, c) = 0.0;
else
cost(r, c) = d_s_c[r][c];
}
// match by hungarian algorithm
//cerr << "cost" << endl << cost << endl;
vector<int> assignment = hungarian_algorithm(cost);
// final match
for (int r = 0; r < m_sources.size(); r++)
{
if (assignment[r] >= centroids.size())
matches[r] = -1;
else
matches[r] = assignment[r];
}
}
//// test. checked correct.
//{
// for (int sid = 0; sid < matches.size(); sid++)
// {
// cerr << "source " << sid << " assigned target " << matches[sid] << endl;
// }
// //cerr << "con?" << endl;
// //getchar();
//}
// end.
return matches;
}
// solve
bool DiscreteSolver::SolveOMT(int mode)
{
// reset
if (!m_result_s_t.empty())
{
for (int i = 0; i < m_result_s_t.size(); i++)
if (!m_result_s_t[i].empty()) vector<int>().swap(m_result_s_t[i]);
vector<vector<int>>().swap(m_result_s_t);
}
m_result_s_t.resize(m_sources.size());
// lloyd for term 1&3
vector<Cluster> clusters = ClusterTargets(mode);
// match for term 2
vector<int> matches = Match(clusters);
// final assignment
for (int sid = 0; sid < m_sources.size(); sid++)
{
int cid = matches[sid];
if (cid == -1)
continue;
for (int idx = 0; idx < clusters[cid].samples.size(); idx++)
{
int tid = clusters[cid].samples[idx];
m_result_s_t[sid].push_back(tid);
}
}
//end.
return true;
} |
if-1.c | /* { dg-do compile } */
void
foo (int a, int b, int *p, int *q)
{
int i;
#pragma omp parallel if (a)
;
#pragma omp parallel if (parallel:a)
;
#pragma omp parallel for simd if (a)
for (i = 0; i < 16; i++)
;
#pragma omp parallel for simd if (parallel : a)
for (i = 0; i < 16; i++)
;
#pragma omp parallel for simd if (simd : a)
for (i = 0; i < 16; i++)
;
#pragma omp parallel for simd if (simd : a) if (parallel:b)
for (i = 0; i < 16; i++)
;
#pragma omp task if (a)
;
#pragma omp task if (task: a)
;
#pragma omp taskloop if (a)
for (i = 0; i < 16; i++)
;
#pragma omp taskloop if (taskloop : a)
for (i = 0; i < 16; i++)
;
#pragma omp taskloop simd if (a)
for (i = 0; i < 16; i++)
;
#pragma omp taskloop simd if (taskloop : a)
for (i = 0; i < 16; i++)
;
#pragma omp taskloop simd if (simd : a)
for (i = 0; i < 16; i++)
;
#pragma omp taskloop simd if (taskloop:b) if (simd : a)
for (i = 0; i < 16; i++)
;
#pragma omp target if (a)
;
#pragma omp target if (target: a)
;
#pragma omp target simd if (a)
for (i = 0; i < 16; i++)
;
#pragma omp target simd if (simd : a) if (target: b)
for (i = 0; i < 16; i++)
;
#pragma omp target teams distribute parallel for simd if (a)
for (i = 0; i < 16; i++)
;
#pragma omp target teams distribute parallel for simd if (parallel : a) if (target: b)
for (i = 0; i < 16; i++)
;
#pragma omp target teams distribute parallel for simd if (simd : a) if (target: b)
for (i = 0; i < 16; i++)
;
#pragma omp target data if (a) map (p[0:2])
;
#pragma omp target data if (target data: a) map (p[0:2])
;
#pragma omp target enter data if (a) map (to: p[0:2])
#pragma omp target enter data if (target enter data: a) map (to: p[0:2])
#pragma omp target exit data if (a) map (from: p[0:2])
#pragma omp target exit data if (target exit data: a) map (from: p[0:2])
#pragma omp target update if (a) to (q[0:3])
#pragma omp target update if (target update:a) to (q[0:3])
#pragma omp parallel
{
#pragma omp cancel parallel if (a)
}
#pragma omp parallel
{
#pragma omp cancel parallel if (cancel:a)
}
#pragma omp for
for (i = 0; i < 16; i++)
{
#pragma omp cancel for if (a)
}
#pragma omp for
for (i = 0; i < 16; i++)
{
#pragma omp cancel for if (cancel: a)
}
#pragma omp sections
{
#pragma omp section
{
#pragma omp cancel sections if (a)
}
}
#pragma omp sections
{
#pragma omp section
{
#pragma omp cancel sections if (cancel: a)
}
}
#pragma omp taskgroup
{
#pragma omp task
{
#pragma omp cancel taskgroup if (a)
}
#pragma omp task
{
#pragma omp cancel taskgroup if (cancel: a)
}
}
}
|
profile.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/colorspace-private.h"
#include "magick/configure.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#elif defined(MAGICKCORE_HAVE_LCMS2_H)
#include <wchar.h>
#include "lcms2.h"
#elif defined(MAGICKCORE_HAVE_LCMS_LCMS_H)
#include <lcms/lcms.h>
#else
#include "lcms.h"
#endif
#endif
/*
Define declarations.
*/
#if !defined(LCMS_VERSION) || (LCMS_VERSION < 2000)
#define cmsSigCmykData icSigCmykData
#define cmsSigGrayData icSigGrayData
#define cmsSigLabData icSigLabData
#define cmsSigLuvData icSigLuvData
#define cmsSigRgbData icSigRgbData
#define cmsSigXYZData icSigXYZData
#define cmsSigYCbCrData icSigYCbCrData
#define cmsSigLinkClass icSigLinkClass
#define cmsColorSpaceSignature icColorSpaceSignature
#define cmsUInt32Number DWORD
#define cmsSetLogErrorHandler(handler) cmsSetErrorHandler(handler)
#define cmsCreateTransformTHR(context,source_profile,source_type, \
target_profile,target_type,intent,flags) cmsCreateTransform(source_profile, \
source_type,target_profile,target_type,intent,flags);
#define cmsOpenProfileFromMemTHR(context,profile,length) \
cmsOpenProfileFromMem(profile,length)
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickSignature);
image->color_profile.length=clone_image->color_profile.length;
image->color_profile.info=clone_image->color_profile.info;
image->iptc_profile.length=clone_image->iptc_profile.length;
image->iptc_profile.info=clone_image->iptc_profile.info;
if (clone_image->profiles != (void *) NULL)
{
if (image->profiles != (void *) NULL)
DestroyImageProfiles(image);
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
if (LocaleCompare(name,"icc") == 0)
{
/*
Continue to support deprecated color profile for now.
*/
image->color_profile.length=0;
image->color_profile.info=(unsigned char *) NULL;
}
if (LocaleCompare(name,"iptc") == 0)
{
/*
Continue to support deprecated IPTC profile for now.
*/
image->iptc_profile.length=0;
image->iptc_profile.info=(unsigned char *) NULL;
}
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
char
key[MaxTextExtent];
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
(void) CopyMagickString(key,name,MaxTextExtent);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,key);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
static unsigned short **DestroyPixelThreadSet(unsigned short **pixels)
{
register ssize_t
i;
assert(pixels != (unsigned short **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (unsigned short *) NULL)
pixels[i]=(unsigned short *) RelinquishMagickMemory(pixels[i]);
pixels=(unsigned short **) RelinquishMagickMemory(pixels);
return(pixels);
}
static unsigned short **AcquirePixelThreadSet(const size_t columns,
const size_t channels)
{
register ssize_t
i;
unsigned short
**pixels;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(unsigned short **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (unsigned short **) NULL)
return((unsigned short **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(unsigned short *) AcquireQuantumMemory(columns,channels*
sizeof(**pixels));
if (pixels[i] == (unsigned short *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
register ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image,
const cmsHPROFILE source_profile,const cmsUInt32Number source_type,
const cmsHPROFILE target_profile,const cmsUInt32Number target_type,
const int intent,const cmsUInt32Number flags)
{
cmsHTRANSFORM
*transform;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) ResetMagickMemory(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR(image,source_profile,source_type,
target_profile,target_type,intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
#endif
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(LCMS_VERSION) && (LCMS_VERSION >= 2000)
static void LCMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
Image
*image;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
image=(Image *) context;
if (image != (Image *) NULL)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageWarning,"UnableToTransformColorspace","`%s'",image->filename);
}
#else
static int LCMSExceptionHandler(int severity,const char *message)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%d, %s",
severity,message != (char *) NULL ? message : "no message");
return(1);
}
#endif
#endif
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,
const MagickBooleanType magick_unused(clone))
{
#define ProfileImageTag "Profile/Image"
#define ThrowProfileException(severity,tag,context) \
{ \
if (source_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_profile); \
if (target_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
**arguments,
*names;
int
number_arguments;
register ssize_t
i;
/*
Delete image profile(s).
*/
names=ConstantString(name);
(void) SubstituteString(&names,","," ");
arguments=StringToArgv(names,&number_arguments);
names=DestroyString(names);
if (arguments == (char **) NULL)
return(MagickTrue);
ResetImageProfileIterator(image);
for (name=GetNextImageProfile(image); name != (const char *) NULL; )
{
for (i=1; i < (ssize_t) number_arguments; i++)
{
if ((*arguments[i] == '!') &&
(LocaleCompare(name,arguments[i]+1) == 0))
break;
if (GlobExpression(name,arguments[i],MagickTrue) != MagickFalse)
{
(void) DeleteImageProfile(image,name);
ResetImageProfileIterator(image);
break;
}
}
name=GetNextImageProfile(image);
}
for (i=0; i < (ssize_t) number_arguments; i++)
arguments[i]=DestroyString(arguments[i]);
arguments=(char **) RelinquishMagickMemory(arguments);
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace");
(void) value;
/* Future.
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image);
value=GetImageProperty(image,"exif:InteroperabilityIndex");
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image);
value=GetImageProperty(image,"exif:InteroperabilityIndex");
if (LocaleCompare(value,"R03.") != 0)
(void) SetAdobeRGB1998ImageProfile(image);
*/
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (LCMS)",
image->filename);
#else
{
cmsHPROFILE
source_profile;
/*
Transform pixel colors as defined by the color profiles.
*/
cmsSetLogErrorHandler(LCMSExceptionHandler);
source_profile=cmsOpenProfileFromMemTHR(image,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile);
else
{
CacheView
*image_view;
ColorspaceType
source_colorspace,
target_colorspace;
cmsColorSpaceSignature
signature;
cmsHPROFILE
target_profile;
cmsHTRANSFORM
*restrict transform;
cmsUInt32Number
flags,
source_type,
target_type;
ExceptionInfo
*exception;
int
intent;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
source_channels,
target_channels;
ssize_t
y;
unsigned short
**restrict source_pixels,
**restrict target_pixels;
exception=(&image->exception);
target_profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_profile=source_profile;
source_profile=cmsOpenProfileFromMemTHR(image,
GetStringInfoDatum(icc_profile),(cmsUInt32Number)
GetStringInfoLength(icc_profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
switch (cmsGetColorSpace(source_profile))
{
case cmsSigCmykData:
{
source_colorspace=CMYKColorspace;
source_type=(cmsUInt32Number) TYPE_CMYK_16;
source_channels=4;
break;
}
case cmsSigGrayData:
{
source_colorspace=GRAYColorspace;
source_type=(cmsUInt32Number) TYPE_GRAY_16;
source_channels=1;
break;
}
case cmsSigLabData:
{
source_colorspace=LabColorspace;
source_type=(cmsUInt32Number) TYPE_Lab_16;
source_channels=3;
break;
}
case cmsSigLuvData:
{
source_colorspace=YUVColorspace;
source_type=(cmsUInt32Number) TYPE_YUV_16;
source_channels=3;
break;
}
case cmsSigRgbData:
{
source_colorspace=sRGBColorspace;
source_type=(cmsUInt32Number) TYPE_RGB_16;
source_channels=3;
break;
}
case cmsSigXYZData:
{
source_colorspace=XYZColorspace;
source_type=(cmsUInt32Number) TYPE_XYZ_16;
source_channels=3;
break;
}
case cmsSigYCbCrData:
{
source_colorspace=YCbCrColorspace;
source_type=(cmsUInt32Number) TYPE_YCbCr_16;
source_channels=3;
break;
}
default:
{
source_colorspace=UndefinedColorspace;
source_type=(cmsUInt32Number) TYPE_RGB_16;
source_channels=3;
break;
}
}
signature=cmsGetPCS(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_profile);
switch (signature)
{
case cmsSigCmykData:
{
target_colorspace=CMYKColorspace;
target_type=(cmsUInt32Number) TYPE_CMYK_16;
target_channels=4;
break;
}
case cmsSigLabData:
{
target_colorspace=LabColorspace;
target_type=(cmsUInt32Number) TYPE_Lab_16;
target_channels=3;
break;
}
case cmsSigGrayData:
{
target_colorspace=GRAYColorspace;
target_type=(cmsUInt32Number) TYPE_GRAY_16;
target_channels=1;
break;
}
case cmsSigLuvData:
{
target_colorspace=YUVColorspace;
target_type=(cmsUInt32Number) TYPE_YUV_16;
target_channels=3;
break;
}
case cmsSigRgbData:
{
target_colorspace=sRGBColorspace;
target_type=(cmsUInt32Number) TYPE_RGB_16;
target_channels=3;
break;
}
case cmsSigXYZData:
{
target_colorspace=XYZColorspace;
target_type=(cmsUInt32Number) TYPE_XYZ_16;
target_channels=3;
break;
}
case cmsSigYCbCrData:
{
target_colorspace=YCbCrColorspace;
target_type=(cmsUInt32Number) TYPE_YCbCr_16;
target_channels=3;
break;
}
default:
{
target_colorspace=UndefinedColorspace;
target_type=(cmsUInt32Number) TYPE_RGB_16;
target_channels=3;
break;
}
}
if ((source_colorspace == UndefinedColorspace) ||
(target_colorspace == UndefinedColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == GRAYColorspace) &&
(IsGrayImage(image,exception) == MagickFalse))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == CMYKColorspace) &&
(image->colorspace != CMYKColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == XYZColorspace) &&
(image->colorspace != XYZColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == YCbCrColorspace) &&
(image->colorspace != YCbCrColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace != CMYKColorspace) &&
(source_colorspace != GRAYColorspace) &&
(source_colorspace != LabColorspace) &&
(source_colorspace != XYZColorspace) &&
(source_colorspace != YCbCrColorspace) &&
(IssRGBCompatibleColorspace(image->colorspace) == MagickFalse))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
switch (image->rendering_intent)
{
case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break;
case PerceptualIntent: intent=INTENT_PERCEPTUAL; break;
case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break;
case SaturationIntent: intent=INTENT_SATURATION; break;
default: intent=INTENT_PERCEPTUAL; break;
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(image,source_profile,
source_type,target_profile,target_type,intent,flags);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_pixels=AcquirePixelThreadSet(image->columns,source_channels);
target_pixels=AcquirePixelThreadSet(image->columns,target_channels);
if ((source_pixels == (unsigned short **) NULL) ||
(target_pixels == (unsigned short **) NULL))
{
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if (source_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
return(MagickFalse);
}
if (target_colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_colorspace);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
register unsigned short
*p;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p=source_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=ScaleQuantumToShort(GetPixelRed(q));
if (source_channels > 1)
{
*p++=ScaleQuantumToShort(GetPixelGreen(q));
*p++=ScaleQuantumToShort(GetPixelBlue(q));
}
if (source_channels > 3)
*p++=ScaleQuantumToShort(GetPixelIndex(indexes+x));
q++;
}
cmsDoTransform(transform[id],source_pixels[id],target_pixels[id],
(unsigned int) image->columns);
p=target_pixels[id];
q-=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ScaleShortToQuantum(*p));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
p++;
if (target_channels > 1)
{
SetPixelGreen(q,ScaleShortToQuantum(*p));
p++;
SetPixelBlue(q,ScaleShortToQuantum(*p));
p++;
}
if (target_channels > 3)
{
SetPixelIndex(indexes+x,ScaleShortToQuantum(*p));
p++;
}
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ProfileImage)
#endif
proceed=SetImageProgress(image,ProfileImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_colorspace);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->matte == MagickFalse ? TrueColorType :
TrueColorMatteType;
break;
}
case cmsSigCmykData:
{
image->type=image->matte == MagickFalse ? ColorSeparationType :
ColorSeparationMatteType;
break;
}
case cmsSigGrayData:
{
image->type=image->matte == MagickFalse ? GrayscaleType :
GrayscaleMatteType;
break;
}
default:
break;
}
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if (cmsGetDeviceClass(source_profile) != cmsSigLinkClass)
status=SetImageProfile(image,name,profile);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
}
(void) cmsCloseProfile(source_profile);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
if (LocaleCompare(name,"icc") == 0)
{
/*
Continue to support deprecated color profile for now.
*/
image->color_profile.length=0;
image->color_profile.info=(unsigned char *) NULL;
}
if (LocaleCompare(name,"iptc") == 0)
{
/*
Continue to support deprecated IPTC profile for now.
*/
image->iptc_profile.length=0;
image->iptc_profile.info=(unsigned char *) NULL;
}
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceBytes(const unsigned char *p,
const ssize_t count,unsigned char *quantum)
{
register ssize_t
i;
for (i=0; i < count; i++)
*quantum++=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
size_t *quantum)
{
*quantum=(size_t) (*p++ << 24);
*quantum|=(size_t) (*p++ << 16);
*quantum|=(size_t) (*p++ << 8);
*quantum|=(size_t) (*p++ << 0);
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++ << 8);
*quantum|=(unsigned short) (*p++ << 0);
return(p);
}
static MagickBooleanType GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block)
{
const unsigned char
*datum;
register const unsigned char
*p;
size_t
length;
StringInfo
*profile;
unsigned char
length_byte;
size_t
count;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&count);
if ((p > (datum+length-count)) || (count > length))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution.
*/
p=ReadResourceShort(p,&resolution)+6;
image->x_resolution=(double) resolution;
p=ReadResourceShort(p,&resolution)+6;
image->y_resolution=(double) resolution;
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfile(image,"iptc",profile);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfile(image,"icc",profile);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfile(image,"exif",profile);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfile(image,"xmp",profile);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
return(MagickTrue);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile)
{
char
key[MaxTextExtent],
property[MaxTextExtent];
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MaxTextExtent);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),CloneStringInfo(profile));
if ((status != MagickFalse) &&
((LocaleCompare(name,"icc") == 0) || (LocaleCompare(name,"icm") == 0)))
{
const StringInfo
*icc_profile;
/*
Continue to support deprecated color profile member.
*/
icc_profile=GetImageProfile(image,name);
if (icc_profile != (const StringInfo *) NULL)
{
image->color_profile.length=GetStringInfoLength(icc_profile);
image->color_profile.info=GetStringInfoDatum(icc_profile);
}
}
if ((status != MagickFalse) &&
((LocaleCompare(name,"iptc") == 0) || (LocaleCompare(name,"8bim") == 0)))
{
const StringInfo
*iptc_profile;
/*
Continue to support deprecated IPTC profile member.
*/
iptc_profile=GetImageProfile(image,name);
if (iptc_profile != (const StringInfo *) NULL)
{
image->iptc_profile.length=GetStringInfoLength(iptc_profile);
image->iptc_profile.info=GetStringInfoDatum(iptc_profile);
}
(void) GetProfilesFromResourceBlock(image,profile);
}
/*
Inject profile into image properties.
*/
(void) FormatLocaleString(property,MaxTextExtent,"%s:sans",name);
(void) GetImageProperty(image,property);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline unsigned short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) ((buffer[1] << 8) | buffer[0]);
return((unsigned short) (value & 0xffff));
}
value=(unsigned short) ((((unsigned char *) buffer)[0] << 8) |
((unsigned char *) buffer)[1]);
return((unsigned short) (value & 0xffff));
}
static inline size_t ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
size_t
value;
if (endian == LSBEndian)
{
value=(size_t) ((buffer[3] << 24) | (buffer[2] << 16) |
(buffer[1] << 8 ) | (buffer[0]));
return((size_t) (value & 0xffffffff));
}
value=(size_t) ((buffer[0] << 24) | (buffer[1] << 16) |
(buffer[2] << 8) | buffer[3]);
return((size_t) (value & 0xffffffff));
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) CopyMagickMemory(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) CopyMagickMemory(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) CopyMagickMemory(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) CopyMagickMemory(p,buffer,2);
}
MagickExport MagickBooleanType SyncImageProfiles(Image *image)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
StringInfo
*profile;
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile == (StringInfo *) NULL)
return(MagickTrue);
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ((int) ReadProfileLong(endian,exif+4));
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
register unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
components,
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format-1) >= EXIF_NUM_FORMATS)
break;
components=(ssize_t) ((int) ReadProfileLong(endian,q+4));
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
ssize_t
offset;
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ((int) ReadProfileLong(endian,q+8));
if ((ssize_t) (offset+number_bytes) < offset)
continue; /* prevent overflow */
if ((size_t) (offset+number_bytes) > length)
continue;
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->x_resolution+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->y_resolution+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
ssize_t
offset;
offset=(ssize_t) ((int) ReadProfileLong(endian,p));
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ((int) ReadProfileLong(endian,directory+2+(12*
number_entries)));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
|
parallel.c | #include "parallel.h"
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#if TCI_USE_OPENMP_THREADS
int tci_parallelize(tci_thread_func func, void* payload,
unsigned nthread, unsigned arity)
{
if (nthread <= 1)
{
func(tci_single, payload);
return 0;
}
tci_context* context;
int ret = tci_context_init(&context, nthread, arity);
if (ret != 0) return ret;
#pragma omp parallel num_threads(nthread)
{
tci_comm comm;
tci_comm_init(&comm, context,
nthread, (unsigned)omp_get_thread_num(), 1, 0);
func(&comm, payload);
#pragma omp barrier
tci_comm_destroy(&comm);
}
return 0;
}
#elif TCI_USE_OMPTASK_THREADS
int tci_parallelize(tci_thread_func func, void* payload,
unsigned nthread, unsigned arity)
{
#pragma omp parallel num_threads(nthread)
{
#pragma omp single
func(tci_single, payload);
}
return 0;
}
#elif TCI_USE_PTHREADS_THREADS
typedef struct
{
tci_thread_func func;
void* payload;
tci_context* context;
unsigned nthread, tid;
} tci_thread_data;
void* tci_run_thread(void* raw_data)
{
tci_thread_data* data = (tci_thread_data*)raw_data;
tci_comm comm;
tci_comm_init(&comm, data->context, data->nthread, data->tid, 1, 0);
data->func(&comm, data->payload);
tci_comm_destroy(&comm);
return NULL;
}
int tci_parallelize(tci_thread_func func, void* payload,
unsigned nthread, unsigned arity)
{
if (nthread <= 1)
{
func(tci_single, payload);
return 0;
}
tci_context* context;
int ret = tci_context_init(&context, nthread, arity);
if (ret != 0) return ret;
pthread_t threads[nthread];
tci_thread_data data[nthread];
for (unsigned i = 1;i < nthread;i++)
{
data[i].func = func;
data[i].payload = payload;
data[i].context = context;
data[i].nthread = nthread;
data[i].tid = i;
int ret = pthread_create(&threads[i], NULL, tci_run_thread, &data[i]);
if (ret != 0)
{
for (unsigned j = 1;j < i;j++) pthread_join(threads[j], NULL);
return ret;
}
}
tci_comm comm0;
tci_comm_init(&comm0, context, nthread, 0, 1, 0);
func(&comm0, payload);
for (unsigned i = 1;i < nthread;i++)
{
pthread_join(threads[i], NULL);
}
return tci_comm_destroy(&comm0);
}
#elif TCI_USE_WINDOWS_THREADS
//TODO
typedef struct
{
tci_thread_func func;
void* payload;
tci_context* context;
unsigned nthread, tid;
} tci_thread_data;
DWORD WINAPI tci_run_thread(void* raw_data)
{
tci_thread_data* data = (tci_thread_data*)raw_data;
tci_comm comm;
tci_comm_init(&comm, data->context, data->nthread, data->tid, 1, 0);
data->func(&comm, data->payload);
tci_comm_destroy(&comm);
return NULL;
}
int tci_parallelize(tci_thread_func func, void* payload,
unsigned nthread, unsigned arity)
{
if (nthread <= 1)
{
func(tci_single, payload);
return 0;
}
tci_context* context;
int ret = tci_context_init(&context, nthread, arity);
if (ret != 0) return ret;
HANDLE threads[nthread-1];
tci_thread_data data[nthread-1];
for (unsigned i = 0;i < nthread-1;i++)
{
data[i].func = func;
data[i].payload = payload;
data[i].context = context;
data[i].nthread = nthread;
data[i].tid = i+1;
threads[i] = CreateThread(NULL, 0, tci_run_thread, &data[i], 0, NULL);
if (!threads[i])
{
WaitForMultipleObjects(i, threads, TRUE, INFINITE);
return -1;
}
}
tci_comm comm0;
tci_comm_init(&comm0, context, nthread, 0, 1, 0);
func(&comm0, payload);
WaitForMultipleObjects(nthread-1, threads, TRUE, INFINITE);
return tci_comm_destroy(&comm0);
}
#else // TCI_USE_TBB_THREADS, TCI_USE_DISPATCH_THREADS,
// TCI_USE_PPL_THREADS, single threaded
int tci_parallelize(tci_thread_func func, void* payload,
unsigned nthread, unsigned arity)
{
tci_comm comm = {NULL, 1, 0, nthread, 0};
func(&comm, payload);
return 0;
}
#endif
void tci_prime_factorization(unsigned n, tci_prime_factors* factors)
{
factors->n = n;
// all this is necessary to appease the warning gods
factors->sqrt_n = (unsigned)lrint(floor(sqrt(n)));
factors->f = 2;
}
unsigned tci_next_prime_factor(tci_prime_factors* factors)
{
for (;factors->f <= factors->sqrt_n;)
{
if (factors->f == 2)
{
if (factors->n%2 == 0)
{
factors->n /= 2;
return 2;
}
factors->f = 3;
}
else if (factors->f == 3)
{
if (factors->n%3 == 0)
{
factors->n /= 3;
return 3;
}
factors->f = 5;
}
else if (factors->f == 5)
{
if (factors->n%5 == 0)
{
factors->n /= 5;
return 5;
}
factors->f = 7;
}
else if (factors->f == 7)
{
if (factors->n%7 == 0)
{
factors->n /= 7;
return 7;
}
factors->f = 11;
}
else
{
if (factors->n%factors->f == 0)
{
factors->n /= factors->f;
return factors->f;
}
factors->f++;
}
}
if (factors->n != 1)
{
unsigned tmp = factors->n;
factors->n = 1;
return tmp;
}
return 1;
}
#define TCI_USE_EXPENSIVE_PARTITION 0
#if TCI_USE_EXPENSIVE_PARTITION
/*
* Assumes base > 0 and power >= 0.
*/
static int ipow(int base, int power)
{
int p = 1;
for (int mask = 0x1;mask <= power;mask <<= 1)
{
if (power&mask) p *= base;
base *= base;
}
return p;
}
#endif
void tci_partition_2x2(unsigned nthread,
uint64_t work1, unsigned max1,
uint64_t work2, unsigned max2,
unsigned* nt1, unsigned* nt2)
{
max1 = TCI_MIN(TCI_MAX(max1, 1), nthread);
max2 = TCI_MIN(TCI_MAX(max2, 1), nthread);
if (nthread < 4)
{
if (max2 < max1 || (max1 == max2 && work1 >= work2))
{
*nt1 = nthread;
*nt2 = 1;
}
else
{
*nt1 = 1;
*nt2 = nthread;
}
return;
}
tci_prime_factors factors;
tci_prime_factorization(nthread, &factors);
#if !TCI_USE_EXPENSIVE_PARTITION
unsigned num1 = 1;
unsigned num2 = 1;
unsigned f;
while ((f = tci_next_prime_factor(&factors)) > 1)
{
if ((work2 >= work1 || num1*f > max1) && num2*f <= max2)
{
work2 /= f;
num2 *= f;
}
else
{
work1 /= f;
num1 *= f;
}
}
*nt1 = num1;
*nt2 = num2;
#else
/*
* Eight distinct prime factors handles all numbers up to 223092870
*/
int fact[8];
int mult[8];
int nfact = 1;
fact[0] = tci_next_prime_factor(&factors);
mult[0] = 1;
int f;
while ((f = tci_next_prime_factor(&factors)) > 1)
{
if (f == fact[nfact-1])
{
mult[nfact-1]++;
}
else
{
nfact++;
fact[nfact-1] = f;
mult[nfact-1] = 1;
}
}
int ntake[8] = {0};
int64_t min_diff = INT64_MAX;
bool done = false;
while (!done)
{
int x = 1;
int y = 1;
for (int i = 0;i < nfact;i++)
{
x *= ipow(fact[i], ntake[i]);
y *= ipow(fact[i], mult[i]-ntake[i]);
}
int64_t diff = llabs(x*work2 - y*work1);
if (diff < min_diff)
{
min_diff = diff;
*nt1 = x;
*nt2 = y;
}
for (int i = 0;i < nfact;i++)
{
if (++ntake[i] > mult[i])
{
ntake[i] = 0;
if (i == nfact-1) done = true;
else continue;
}
break;
}
}
#endif
assert((*nt1)*(*nt2) == nthread);
}
|
tasks.c | /*
testing omp constructs to represents tasks
https://www.openmp.org//wp-content/uploads/sc13.tasking.ruud.pdf
compile:
$ clang tasks.c -fopenmp=libomp -o tasks
*/
#include <stdio.h>
#include <unistd.h>
void task1(){
int i=0;
while(1){
printf("task1 - %d\n", i);
i++;
sleep(5);
}
}
void task2(){
int i=0;
while(1){
printf("task2 - %d\n", i);
i++;
sleep(4);
}
}
int main(int argc, char *argv[])
{
#pragma omp parallel
{
#pragma omp single
{
printf("A ");
#pragma omp task
{
//printf("car ");
task2();
}
#pragma omp task
{
//printf("race ");
task1();
}
#pragma omp taskwait
printf("is fun to watch ");
}
} // End of parallel region
printf("\n");
return(0);
} |
train2.c | #define _GNU_SOURCE
#include <syscall.h>
#include <sched.h>
#include "graph.h"
#include "mainFunctions.h"
#include "powerperformacetracking.h"
#include "print.h"
#include <stdlib.h>
#include<unistd.h>
#define NO_OF_ARGS 2
#define REPEAT 25
long long iters[8];
struct timeval start, end;
// We define all additional paramemter here
void setaffinity() {
/* #pragma omp parallel
{
cpu_set_t newcpu;
int threadid = omp_get_thread_num();
CPU_ZERO(&newcpu);
CPU_SET ( threadid , &newcpu) ;
int __t = sched_setaffinity ( syscall ( SYS_gettid ) , sizeof ( newcpu ) , &newcpu ) ;
assert(__t == 0);
}
*/
}
void updateMultipleArrayPerIteration(graph *G, int id) {
printf("The update multiple Array %d \n", id);
node_t * G_member = (int*)malloc (G->numNodes * sizeof(int));
srand(0);
int i;
for(i = 0; i< G->numNodes; i++) {
G_member[i] = rand() % G->numNodes;
}
char title[50];
sprintf(title, "multiple_%d.csv",id);
gettimeofday(&start, NULL);
inittracking(title);
int tShared = 0;
for(int abc=0; abc < REPEAT; abc ++) {
#pragma omp parallel
{
int threadid = omp_get_thread_num();
// iters[threadid] = 0;
int t = 0;
#pragma omp for schedule(dynamic, 1024)
for (node_t u1 = 0; u1 < G->numNodes; u1 ++)
for (edge_t j_idx = G->begin[u1];j_idx < G->begin[u1+1] ; j_idx ++) {
// iters[threadid]++;
node_t j = G->node_idx [j_idx];
for (edge_t k_idx = G->begin[j];k_idx < G->begin[j+1] ; k_idx ++) {
node_t k = G_member[j];
if(k > G->numNodes/2) {
t++;
}
}
node_t j_comp = ( G->numNodes - (j+1));
for (edge_t k_idx = G->begin[j_comp];k_idx < G->begin[j_comp+1] ; k_idx ++) {
node_t k = G_member[j];
if(k > G ->numNodes/2) {
t++;
}
}
}
//printf("dummy %d \n",t);
//printf("The num iters thread id %d = %lld \n", threadid, iters[threadid]);
#pragma omp atomic
tShared += t;
}
tShared /= 1000;
}
printf("TSHared %d\n", tShared);
endtracking();
gettimeofday(&end, NULL);
printTiming(ALGO_KERNEL,((end.tv_sec - start.tv_sec)*1000 + ((double)(end.tv_usec - start.tv_usec))/1000));
free(G_member);
}
#define numTimes 7
int runalgo(int argc,char** argv) {
int i;
//setaffinity();
graph* G = readGraph(argv[1], argv[2]);
for(i = 0;i< numTimes; i++) {
printf("Run %d \n", i);
updateMultipleArrayPerIteration(G,i);
sleep(2);
}
return 0;
}
inline void kernel(graph *G) {
}
|
driver.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <stdint.h>
#include <unistd.h>
#include <stdlib.h>
#include <omp.h>
#include <mpi.h>
#include <string.h>
#include "TT06Gates.h"
void HPM_Start(char *);
void HPM_Stop(char *);
typedef struct { int nCores, nThreads; struct { int coreID, threadID;} map[64];} threadInfo;
typedef struct { double nonGateMin, nonGateMax, gateMin, gateMax;} TIME;
int perm[24][4] = {{0,1,2,3}, {0,1,3,2}, {0,2,1,3}, {0,2,3,1}, {0,3,1,2}, {0,3,2,1},
{1,0,2,3}, {1,0,3,2}, {1,2,0,3}, {1,2,3,0}, {1,3,0,2}, {1,3,2,0},
{2,0,1,3}, {2,0,3,1}, {2,1,0,3}, {2,1,3,0}, {2,3,0,1}, {2,3,1,0},
{3,0,1,2}, {3,0,2,1}, {3,1,0,2}, {3,1,2,0}, {3,2,0,1}, {3,2,1,0}};
//#define BGQ
#ifdef BGQ
#include <spi/include/l1p/pprefetch.h>
#include <spi/include/l1p/sprefetch.h>
#endif
#include "TT06Func.h"
void update_nonGate(void *ptr, double dt, struct CellTypeParms *p, int nCells, int *cellTypeVector, const double *VM, int offset, double *gates[19], double *dVdt);
void update_nonGate_v1(void *ptr, double dt, struct CellTypeParms *p,int nCells, int *cellTypeVector, const double *VM, int offset, double *gates[19], double *dVdt);
double initState(double *states,double *gates, int cellType);
char *getStateName(int index);
void initExp();
void fastLogInit();
//void initArray();
typedef void (*UPDATENONGATE)(void *ptr, double dt, struct CellTypeParms *p,int nCells, int *cellTypeVector, const double *VM, int offset, double *gates[19], double *dVdt);
typedef void (*UPDATEGATE)(double dt, int nCells, double *VM, double *g, double *mhu_a, double *tauR_a) ;
//static double mhu[13][50];
//static double tauR[13][50];
static double *mhu[13];
static double *tauR[13];
static double dt=0.01;
static int nonGatesFlag=0;
static int gatesFlag=0;
static int map[12]={0,1,2,3,4,5,6,7,8,9,10,11};
static int gateSwap=0;
static double mhuB[13*50];
static double tauRB[13*50];
int mapBad(int *map)
{
int flag[12];
for (int i=0;i<12;i++) flag[i]=0;
for (int i=0;i<12;i++)
{
if ( flag[map[i]] == 0) flag[map[i]] = 1;
else
{
printf("%d\n",i);
return 1;
}
}
return 0;
}
threadInfo getThreadInfo()
{
int nThreads = omp_get_max_threads();
int nCores= 4;
#ifdef BGQ
nCores=16;
#endif
threadInfo info ;
info.nCores=nCores;
info.nThreads=nThreads;
for (int ompID=0;ompID<nThreads;ompID++)
{
info.map[ompID].coreID = ompID%nCores;
info.map[ompID].threadID = (ompID/nCores) *nCores + info.map[ompID].coreID;
}
return info;
}
void init(int cellType, int nCellsOnNode, int *cellTypeVector, double **g, double *Vm)
{
int ompID = omp_get_thread_num();
for (int i=0;i<nCellsOnNode;i++)
{
double state[nStateVar];
double gate[1];
double vm=initState(state,gate,cellType);
cellTypeVector[i] = cellType;
Vm[i] = vm;
int k;
for (int j=0;j<19;j++)
{
if (gateSwap && j>6) k = 7+map[j-7]; else k = j;
g[j][i]=state[k];
}
//if (gateSwap) k = 7+map[11]; else k = 18;
// g[19][i]=state[k];
}
}
void readData()
{
char line[1024],name[16];
int l,m;
FILE *file=fopen("../coef.data","r");
for (int i=0;i<13;i++)
{
double *a;
int k;
fgets(line,1023,file);
a = mhu[i];
k=sscanf(line,"%s %d %d %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
name,&l,&m,a+0, a+1,a+2,a+3,a+4,a+5,a+6,a+7,a+8,a+9,a+10,a+11,a+12,a+13,a+14,a+15,a+16,a+17,a+18,a+19,a+20,a+21,a+22,a+23,a+24);
fgets(line,1023,file);
a = tauR[i];
k=sscanf(line,"%s %d %d %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
name,&l,&m,a+0, a+1,a+2,a+3,a+4,a+5,a+6,a+7,a+8,a+9,a+10,a+11,a+12,a+13,a+14,a+15,a+16,a+17,a+18,a+19,a+20,a+21,a+22,a+23,a+24);
}
fclose(file);
}
TIME parallelSection(threadInfo *info, UPDATENONGATE updateNonGateFunc, UPDATEGATE updateGateFuncs[], double dt, int maxLoop, int nCells, int *cellTypeVector, double *Vm, double *g[19], double *dVdt, FILE *file)
{
struct CellTypeParms cellParms[4]={{0,0,2.724,0.392,0.073,0.0},{0,0,2.724,0.098,0.294,0.0},{ 0,0,2.724, 0.392, 0.294, 0.0 },{0,0,0.2724,0.098,0.294,0.0}};
#ifdef BGQ
HPM_Start("Reaction");
//vprof_start();
#endif
double cpuTimes[128];
for(int i=0;i<128;i++) cpuTimes[i]=0.0;
int loop = 0;
#pragma omp parallel
{
int nThreads = info ->nThreads;
int nCores = info ->nCores;
int nSquad = nThreads/nCores;
int nCellPerCore = nCells/nCores;
int nCellPerThread = nCells/nThreads;
int ompID = omp_get_thread_num();
int coreID = info->map[ompID].coreID;
int offsetCore = coreID*nCellPerCore;
int threadID = info->map[ompID].threadID;
int offsetThread = threadID*(nCellPerThread);
int squadID = threadID / nCores;
int nEq = 12/nSquad;
int offsetGEq = squadID* nEq;
int squadRank = threadID % nSquad;
int loop;
double time=0.0;
for (loop=0;loop<maxLoop;loop++)
{
double t0,t1;
if (nonGatesFlag)
{
#pragma omp barrier
t0 = omp_get_wtime();
updateNonGateFunc(NULL,dt, cellParms, nCellPerThread, cellTypeVector, Vm+offsetThread, offsetThread, g, dVdt+offsetThread);
t1 = omp_get_wtime();
cpuTimes[2*ompID]+= t1-t0;
}
if (gatesFlag)
{
#pragma omp barrier
t0 = omp_get_wtime();
for (int i=0;i<nEq;i++)
{
int eqx = offsetGEq+i;
int eq = map[eqx];
int gateIndex;
if (gateSwap) gateIndex = eqx ; else gateIndex = eq;
updateGateFuncs[eq](dt, nCellPerCore, Vm+offsetCore, g[gateIndex+7]+offsetCore, mhu[eq], tauR[eq]);
}
t1 = omp_get_wtime();
cpuTimes[2*ompID+1]+= t1-t0;
}
time+=dt;
}
#pragma omp barrier
}
#ifdef BGQ
//vprof_stop();
HPM_Stop("Reaction");
#endif
int nThreads = info ->nThreads;
TIME time;
time.gateMax =0.0;
time.gateMin =10000.0;
time.nonGateMax =0.0;
time.nonGateMin =10000.0;
for(int ompID=0;ompID<nThreads;ompID++)
{
double nonGateTime = cpuTimes[2*ompID];
double gateTime = cpuTimes[2*ompID+1];
if (nonGateTime > time.nonGateMax) time.nonGateMax = nonGateTime;
if ( gateTime > time.gateMax ) time.gateMax = gateTime;
if (nonGateTime < time.nonGateMin) time.nonGateMin = nonGateTime;
if ( gateTime < time.gateMin ) time.gateMin = gateTime;
}
if (file != NULL)
{
double cT = 1e6/maxLoop;
fprintf(file,"#ompID coreID nonGate gate\n");
for(int ompID=0;ompID<nThreads;ompID++)
{
fprintf(file,"%6d %6d %12.6f %12.6f\n",ompID,ompID%16,cT*cpuTimes[2*ompID],cT*cpuTimes[2*ompID+1]);fflush(stdout);
}
fprintf(file,"end_of_data\n"); fflush(file);
}
return time;
}
int main(int argc, char **argv)
{
int compare=0;
int maxLoop = 100000;
int nMinSteps = 0;
int nCellsOnNode=4096;
int jobId=Kernel_GetJobID();
printf("jobId=%d\n",jobId);
int mode = 0775;
char dirname[256];
sprintf(dirname,"jobId.%d",jobId);
int rc;
rc = mkdir(dirname, mode);
rc = chdir(dirname);
int cellType = 100;
mhu[0] = mhuB;
tauR[0] = &tauRB[0];
for (int i=1;i<13;i++)
{
mhu[i] = mhu[i-1]+50;
tauR[i] =tauR[i-1]+50;
}
FILE *file=fopen("time.data","w");
for (int i=1;i<argc;i++)
{
if (strcmp(argv[i],"-nonGates") ==0) nonGatesFlag=1;
if (strcmp(argv[i],"-gates") ==0) gatesFlag=1;
if (strcmp(argv[i],"-n") ==0) nCellsOnNode = atol(argv[++i]);
if (strcmp(argv[i],"-maxLoop") ==0) maxLoop = atol(argv[++i]);
if (strcmp(argv[i],"-cellType") == 0) cellType = atol(argv[++i]);
if (strcmp(argv[i],"-compare") == 0) compare=1;
if (strcmp(argv[i],"-nMinSteps") == 0) nMinSteps = atol(argv[++i]);
if (strcmp(argv[i],"-map") == 0)
{
for (int j=0;j<12;j++) map[j] = atol(argv[++i]);
}
if (strcmp(argv[i],"-h") ==0)
{
printf("usage: driver [-nonGates] [-gates] [-compare] [-nMinSteps <steps>] [-maxLoop <#TimeSteps>] [-n <#cells>] [-map 0 1 2 3 4 5 6 7 8 9 10 11] [-cellType <100|101|102>]>\n");
exit(0);
}
}
cellType -= 100;
int myid;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
UPDATEGATE updateFuncs0[]={ update_mGate, update_hGate, update_jGate, update_Xr1Gate, update_Xr2Gate, update_XsGate, update_rGate, update_dGate,
update_fGate, update_f2Gate, update_jLGate, update_s0Gate, update_s1Gate} ;
UPDATEGATE updateFuncs1[]={ update_mGate_v1, update_hGate_v1, update_jGate_v1, update_Xr1Gate_v1, update_Xr2Gate_v1, update_XsGate_v1, update_rGate_v1, update_dGate_v1,
update_fGate_v1, update_f2Gate_v1, update_jLGate_v1, update_s0Gate_v1, update_s1Gate_v1} ;
if (cellType != 0)
{
updateFuncs0[11] = update_s1Gate;
updateFuncs1[11] = update_s1Gate_v1;
mhu[11] = mhu[12];
tauR[11] = tauR[12];
}
// create a aligned buffer and offset
uint64_t sizeBuffer = 22*(nCellsOnNode+63)*sizeof(double);
uint64_t buffer = (uint64_t)malloc(sizeBuffer);
uint64_t offset = 4*((nCellsOnNode+3)/4);
double *start = (double *)(32*((buffer+31)/32));
// Make sure the gates (g), nonGate (cell), voltage (Vm) and dVm/dt (dVdt) are 32 byte aligned
double *g[20]; for (int i=0;i<20;i++) g[i] = start + i*offset;
double *Vm =start+20*offset;
double *dVdt =start+21*offset;
int cellTypeVector[offset];
#if BGQ
#pragma omp parallel
{
L1P_SetStreamPolicy(L1P_stream_confirmed);
L1P_SetStreamAdaptiveMode(0);
L1P_SetStreamDepth(2);
}
#endif
initExp();
fastLogInit();
initCnst();
initNonGateCnst();
readData();
threadInfo info = getThreadInfo();
double sum0[19];
if (compare)
{
init(cellType,nCellsOnNode, cellTypeVector,g, Vm);
parallelSection(&info, update_nonGate, updateFuncs0, dt, maxLoop, nCellsOnNode, cellTypeVector, Vm, g, dVdt,NULL);
for (int eq =0;eq<19;eq++)
{
sum0[eq]=0;
for (int i=0;i<nCellsOnNode;i++) sum0[eq]+=g[eq][i]; sum0[eq] /= nCellsOnNode;
}
}
printf("nMinStep=%d\n",nMinSteps);
if (nMinSteps > 1)
{
int i,j,m;
init(cellType,nCellsOnNode, cellTypeVector,g, Vm);
TIME time = parallelSection(&info, update_nonGate_v1, updateFuncs1, dt, maxLoop, nCellsOnNode, cellTypeVector, Vm, g, dVdt,NULL);
double gateTimeBest = time.gateMax;
double cT = 1e6/maxLoop;
printf("%8d %8d",0,0);
printf(" %12.6f %12.6f",cT*time.gateMin,cT*time.gateMax);
for (int k=0;k<12;k++) {printf(" %2d",map[k]); } printf("\n"); fflush(stdout);
for (int step=0;step<nMinSteps;step++)
{
int cnt =0;
int flag[] ={ 0,0,0,0,0,0,0,0,0,0,0,0};
int list[4];
while (cnt< 4)
{
int ii = 12 * drand48();
if (ii == 12) continue;
if (flag[ii] == 0) list[cnt++]=ii;
flag[ii] = 1;
}
int listValues[4];
for (int k=0;k<4;k++) listValues[k] = map[list[k]];
for (int l=1;l<24;l++)
{
for (int k=0;k<4;k++) map[list[k]] = listValues[perm[l][k]];
if (mapBad(map))
{
printf("bad %d %d %d %d\n",list[0],list[1],list[2],list[3]);
for (int k=0;k<12;k++) {printf(" %2d",map[k]); } printf("\n"); fflush(stdout);
exit(0);
}
init(cellType,nCellsOnNode, cellTypeVector,g, Vm);
TIME time = parallelSection(&info, update_nonGate_v1, updateFuncs1, dt, maxLoop, nCellsOnNode, cellTypeVector, Vm, g, dVdt,NULL);
if (time.gateMax < gateTimeBest)
{
gateTimeBest = time.gateMax;
rewind(file);
printf("%8d %8d",step,l);
printf(" %12.6f %12.6f",cT*time.gateMin,cT*time.gateMax);
for (int k=0;k<12;k++) {printf(" %2d",map[k]); } printf("\n"); fflush(stdout);
fprintf(file,"#"); for (i=0;i<argc;i++) fprintf(file, "%s ", argv[i]); fprintf(file,"\n");
fprintf(file,"# new map=");for(int i=0;i<12;i++) fprintf(file," %2d",map[i]); fprintf(file,"\n");
TIME time = parallelSection(&info, update_nonGate_v1, updateFuncs1, dt, maxLoop, nCellsOnNode, cellTypeVector, Vm, g, dVdt,file);
fflush(stdout);
}
else
{
for (int k=0;k<4;k++) map[list[k]] = listValues[k]; //Undo change
}
}
}
}
else
{
double cT = 1e6/maxLoop;
init(cellType,nCellsOnNode, cellTypeVector,g, Vm);
TIME time = parallelSection(&info, update_nonGate_v1, updateFuncs1, dt, maxLoop, nCellsOnNode, cellTypeVector, Vm, g, dVdt,file);
printf("Time per time-step (usec)\n");
printf("nonGate(min/max) = %12.6f/%12.6f",cT*time.nonGateMin,cT*time.nonGateMax);
printf(" Gate(min/max) = %12.6f/%12.6f\n",cT*time.gateMin,cT*time.gateMax);
}
if (compare)
{
printf("\n********************************\n");
printf("g0 = gSNorm[eq][0]\n");
printf("g1 = gSimd[eq][0]\n");
printf("ave = 1 -0.5*(<g0>+<g1>)/g1\n");
printf("diff = <g1>-<g1>)/<g0>\n");
printf("%2s %-9s %9s %9s %9s %15s %9s\n","eq","eq Name","g1","<g0>","<g1>","ave ","diff");
for (int eq =0;eq<19;eq++)
{
double sum1=0; for (int i=0;i<nCellsOnNode;i++) sum1+=g[eq][i]; sum1 /= nCellsOnNode;
double sum = g[eq][0];
printf("%2d %-9s %10.3e %10.3e %10.3e error=%9.2e %9.2e\n",eq,getStateName(eq), sum,sum0[eq],sum1,1.0-0.5*(sum0[eq]+sum1)/sum,(sum1-sum0[eq])/sum0[eq]);
}
}
MPI_Finalize();
}
|
GB_unaryop__identity_int8_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int8_bool
// op(A') function: GB_tran__identity_int8_bool
// C type: int8_t
// A type: bool
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int8_bool
(
int8_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int8_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
finalOMP.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#include <omp.h>
#define NMAX 75000000
static double N[NMAX];
static int lt[NMAX];
static int gt[NMAX];
static double local[NMAX];
void printArray(int n){
int j;
printf("[");
int t =0;
for(j = 0; j<n; j++){
if(t){
printf(", %f", N[j]);
}else{
t=1;
printf("%f", N[j]);
}
}
printf("]\n");
}
double drand ( double low, double high )
{
return ( (double)rand() * ( high - low ) ) / (double)RAND_MAX + low;
}
void fillArrayRandom(int n){
int j;
for(j = 0; j<n; j++){
double r = drand(0,1000);
N[j]=r;
}
}
int partition(int p, int r){
double key=N[r];
int i=p-1;
int j;
double temp;
for(j=p; j<r; j++){
if(N[j]<=key){
i+=1;
temp = N[i];
N[i]=N[j];
N[j]=temp;
}
}
temp = N[i+1];
N[i+1]=N[r];
N[r]=temp;
return i+1;
}
void quickSortHelper(int p, int r){
if(p<r){
int q=partition(p,r);
quickSortHelper(p,q-1);
quickSortHelper(q+1,r);
}
}
double sequentialQuickSort(int n){
double t1, t2;
t1 = omp_get_wtime();
quickSortHelper(0, n-1);
t2 = omp_get_wtime();
return t2-t1;
}
void insertionSortHelper(int p, int r){
double key;
int j, i;
for (i = p+1; i<r+1 ; i++){
key = N[i];
j = i-1;
while (j >= p && N[j] > key){
N[j+1] = N[j];
j--;
}
N[j+1] = key;
}
}
void prefixSum(int arr[], int p, int r){
int i;
for(i=p+1;i<r+1;i++){
arr[i]+=arr[i-1];
}
}
int log_2(int n){
int i=0;
while(n >>= 1) {++i;}
return i;
}
void parallelPrefixSum(int p, int r){
int len = r-p+1;
int shift, j, h;
int k = log_2(len);
for(h=1; h<k+1;h++){
shift = 1<<h;
#pragma omp parallel
{
#pragma omp for schedule(static)
for(j=1; j<(len/shift)+1;j++){
lt[p+j*shift-1]+=lt[p+j*shift-(shift/2)-1];
gt[p+j*shift-1]+=gt[p+j*shift-(shift/2)-1];
}
}
}
for(h=k; h>-1;h--){
shift = 1<<h;
#pragma omp parallel
{
#pragma omp for schedule(static)
for(j=2; j<(len/shift)+1;j++){
if(j%2==1){
lt[p+j*shift-1]+=lt[p+j*shift-shift-1];
gt[p+j*shift-1]+=gt[p+j*shift-shift-1];
}
}
}
}
}
int parallelPartition(int p, int r){
double key=N[r];
int i,j;
double temp;
#pragma omp parallel
{
#pragma omp for schedule(static)
for (i=p; i<r+1; i++){
lt[i]=0;
gt[i]=0;
local[i]=N[i];
}
#pragma omp for schedule(static)
for (i = p; i <r; i++){
if(N[i]<key){
lt[i]=1;
gt[i]=0;
}else{
lt[i]=0;
gt[i]=1;
}
}
}
parallelPrefixSum(p,r);
int pivot = lt[r];
N[pivot+p]=key;
#pragma omp parallel
{
#pragma omp for schedule(static)
for(i=p; i<r; i++){
if(local[i]<key){
int index = p+lt[i]-1;
N[index]=local[i];
}else{
int index = p+pivot+gt[i];
N[index]=local[i];
}
}
}
return pivot+p;
}
void psqHelper(int p, int r, int size){
if(p<r){
if(r-p<=50){
insertionSortHelper(p,r);
}else{
int q;
if(r-p < 0.5*size){
q = partition(p,r);
}else{
q=parallelPartition(p,r);
}
#pragma omp task
psqHelper(p,q-1, size);
#pragma omp task
psqHelper(q+1,r, size);
}
}
}
double parallelQuickSort(int n){
double t1, t2;
#pragma omp master
t1 = omp_get_wtime();
#pragma omp parallel
{
#pragma omp single nowait
{
psqHelper(0, n-1, n);
}
}
#pragma omp master
t2 = omp_get_wtime();
return t2-t1;
}
int checkArray(int n){
int j;
for(j = 0; j<n-1; j++){
if(N[j]>N[j+1]){
return -1;
}
}
return 0;
}
int main(int argc, char * argv[]){
FILE* fp = fopen("simTimes.csv","w+");
int len=15;
int n[] = {10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000,20000,200000,2000000,20000000,75000000};
int i;
srand(getpid());
for(i = 0; i<len; i++){
fillArrayRandom(n[i]);
double t = parallelQuickSort(n[i]);
printf("%d elements sorted in %f time\n", n[i], t);
if(checkArray(n[i])==-1){
printf("SORT FAILED\n");
}else{
printf("SUCCESSFUL SORT\n");
}
}
fclose(fp);
}
|
MD5_fmt.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 1996-2001,2008,2010-2012 by Solar Designer
*
* ...with changes in the jumbo patch, by bartavelle and magnum.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*/
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "sse-intrinsics.h"
#include "MD5_std.h"
#include "common.h"
#include "formats.h"
#if defined(_OPENMP) && defined(MD5_SSE_PARA)
#define OMP_SCALE 4
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "md5crypt"
#define FORMAT_NAME "crypt(3) $1$"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 15
#define CIPHERTEXT_LENGTH 22
#ifdef MD5_SSE_PARA
#define BINARY_SIZE 16
#else
#define BINARY_SIZE 4
#endif
#define BINARY_ALIGN 4
#define SALT_SIZE 9
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT MD5_N
#define MAX_KEYS_PER_CRYPT MD5_N
static struct fmt_tests tests[] = {
{"$1$12345678$aIccj83HRDBo6ux1bVx7D1", "0123456789ABCDE"},
{"$apr1$Q6ZYh...$RV6ft2bZ8j.NGrxLYaJt9.", "test"},
{"$1$12345678$f8QoJuo0DpBRfQSD0vglc1", "12345678"},
{"$1$$qRPK7m23GJusamGpoGLby/", ""},
{"$apr1$a2Jqm...$grFrwEgiQleDr0zR4Jx1b.", "15 chars is max"},
{"$1$$AuJCr07mI7DSew03TmBIv/", "no salt"},
{"$1$`!@#%^&*$E6hD76/pKTS8qToBCkux30", "invalid salt"},
{"$1$12345678$xek.CpjQUVgdf/P2N9KQf/", ""},
{"$1$1234$BdIMOAWFOV2AQlLsrN/Sw.", "1234"},
{"$apr1$rBXqc...$NlXxN9myBOk95T0AyLAsJ0", "john"},
{"$apr1$Grpld/..$qp5GyjwM2dnA5Cdej9b411", "the"},
{"$apr1$GBx.D/..$yfVeeYFCIiEXInfRhBRpy/", "ripper"},
{"$1$bb$19smCEBG0Q1pVil0/HqK./", "aaaaa"},
{"$1$coin$rebm0t9KJ56mgGWJF5o5M0", "lapin"},
{"$1$pouet$/Ecz/vyk.zCYvrr6wB78h0", "canard"},
{"$1$test2$02MCIATVoxq3IhgK6XRkb1", "test1"},
{"$1$aussi$X67z3kXsWo92F15uChx1H1", "felicie"},
{"$1$boire$gf.YM2y3InYEu9.NbVr.v0", "manger"},
{"$1$bas$qvkmmWnVHRCSv/6LQ1doH/", "haut"},
{"$1$gauche$EPvd6LZlrgb0MMFPxUrJN1", "droite"},
/* following hashes are AIX non-standard smd5 hashes */
{"{smd5}s8/xSJ/v$uGam4GB8hOjTLQqvBfxJ2/", "password"},
{"{smd5}alRJaSLb$aKM3H1.h1ycXl5GEVDH1e1", "aixsucks?"},
{"{smd5}eLB0QWeS$Eg.YfWY8clZuCxF0xNrKg.", "0123456789ABCDE"},
/* following hashes are AIX standard smd5 hashes (with corrected tag)
* lpa_options = std_hash=true */
{"$1$JVDbGx8K$T9h8HK4LZxeLPMTAxCfpc1", "password"},
{"$1$1Cu6fEvv$42kuaJ5fMEqyVStPuFG040", "0123456789ABCDE"},
{"$1$ql5x.xXL$vYVDhExol2xUBBpERRWcn1", "jtr>hashcat"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
#ifdef MD5_SSE_PARA
static unsigned char cursalt[SALT_SIZE];
static int CryptType;
static MD5_word (*sout);
static int omp_para = 1;
#endif
static void init(struct fmt_main *self)
{
MD5_std_init(self);
#if defined(_OPENMP) && defined(MD5_SSE_PARA)
omp_para = omp_get_max_threads();
if (omp_para < 1)
omp_para = 1;
self->params.min_keys_per_crypt = MD5_N * omp_para;
omp_para *= OMP_SCALE;
self->params.max_keys_per_crypt = MD5_N * omp_para;
#elif MD5_std_mt
self->params.min_keys_per_crypt = MD5_std_min_kpc;
self->params.max_keys_per_crypt = MD5_std_max_kpc;
#endif
saved_key = mem_calloc_tiny(
sizeof(*saved_key) * self->params.max_keys_per_crypt,
MEM_ALIGN_CACHE);
#ifdef MD5_SSE_PARA
sout = mem_calloc_tiny(sizeof(*sout) *
self->params.max_keys_per_crypt *
BINARY_SIZE, sizeof(MD5_word));
#endif
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *pos, *start;
if (strncmp(ciphertext, "$1$", 3)) {
if (strncmp(ciphertext, "$apr1$", 6) &&
strncmp(ciphertext, "{smd5}", 6))
return 0;
ciphertext += 3;
}
for (pos = &ciphertext[3]; *pos && *pos != '$'; pos++);
if (!*pos || pos < &ciphertext[3] || pos > &ciphertext[11]) return 0;
start = ++pos;
while (atoi64[ARCH_INDEX(*pos)] != 0x7F) pos++;
if (*pos || pos - start != CIPHERTEXT_LENGTH) return 0;
if (atoi64[ARCH_INDEX(*(pos - 1))] & 0x3C) return 0;
return 1;
}
static int get_hash_0(int index)
{
#ifdef MD5_SSE_PARA
unsigned int x,y;
x = index&3;
y = index/4;
return ((MD5_word *)sout)[x+y*MMX_COEF*4] & 0xF;
#else
init_t();
return MD5_out[index][0] & 0xF;
#endif
}
static int get_hash_1(int index)
{
#ifdef MD5_SSE_PARA
unsigned int x,y;
x = index&3;
y = index/4;
return ((MD5_word *)sout)[x+y*MMX_COEF*4] & 0xFF;
#else
init_t();
return MD5_out[index][0] & 0xFF;
#endif
}
static int get_hash_2(int index)
{
#ifdef MD5_SSE_PARA
unsigned int x,y;
x = index&3;
y = index/4;
return ((MD5_word *)sout)[x+y*MMX_COEF*4] & 0xFFF;
#else
init_t();
return MD5_out[index][0] & 0xFFF;
#endif
}
static int get_hash_3(int index)
{
#ifdef MD5_SSE_PARA
unsigned int x,y;
x = index&3;
y = index/4;
return ((MD5_word *)sout)[x+y*MMX_COEF*4] & 0xFFFF;
#else
init_t();
return MD5_out[index][0] & 0xFFFF;
#endif
}
static int get_hash_4(int index)
{
#ifdef MD5_SSE_PARA
unsigned int x,y;
x = index&3;
y = index/4;
return ((MD5_word *)sout)[x+y*MMX_COEF*4] & 0xFFFFF;
#else
init_t();
return MD5_out[index][0] & 0xFFFFF;
#endif
}
static int get_hash_5(int index)
{
#ifdef MD5_SSE_PARA
unsigned int x,y;
x = index&3;
y = index/4;
return ((MD5_word *)sout)[x+y*MMX_COEF*4] & 0xFFFFFF;
#else
init_t();
return MD5_out[index][0] & 0xFFFFFF;
#endif
}
static int get_hash_6(int index)
{
#ifdef MD5_SSE_PARA
unsigned int x,y;
x = index&3;
y = index/4;
return ((MD5_word *)sout)[x+y*MMX_COEF*4] & 0x7FFFFFF;
#else
init_t();
return MD5_out[index][0] & 0x7FFFFFF;
#endif
}
static int salt_hash(void *salt)
{
unsigned int i, h, retval;
retval = 0;
for (i = 0; i <= 6; i += 2) {
h = (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i])];
h ^= ((unsigned char *)salt)[i + 1];
h <<= 6;
h ^= (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i + 1])];
h ^= ((unsigned char *)salt)[i];
retval += h;
}
retval ^= retval >> SALT_HASH_LOG;
retval &= SALT_HASH_SIZE - 1;
return retval;
}
static void set_key(char *key, int index)
{
#ifndef MD5_SSE_PARA
MD5_std_set_key(key, index);
#endif
strnfcpy(saved_key[index], key, PLAINTEXT_LENGTH);
}
static char *get_key(int index)
{
saved_key[index][PLAINTEXT_LENGTH] = 0;
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
#ifdef MD5_SSE_PARA
#ifdef _OPENMP
int t;
#pragma omp parallel for
for (t = 0; t < omp_para; t++)
md5cryptsse((unsigned char *)(&saved_key[t*MD5_N]), cursalt, (char *)(&sout[t*MD5_N*BINARY_SIZE/sizeof(MD5_word)]), CryptType);
#else
md5cryptsse((unsigned char *)saved_key, cursalt, (char *)sout, CryptType);
#endif
#else
MD5_std_crypt(count);
#endif
return count;
}
static int cmp_all(void *binary, int count)
{
#ifdef MD5_SSE_PARA
unsigned int x,y;
for(y=0;y<MD5_SSE_PARA*omp_para;y++) for(x=0;x<MMX_COEF;x++)
{
if( ((MD5_word *)binary)[0] == ((MD5_word *)sout)[x+y*MMX_COEF*4] )
return 1;
}
return 0;
#else
#if MD5_std_mt
int t, n = (count + (MD5_N - 1)) / MD5_N;
#endif
for_each_t(n) {
#if MD5_X2
if (*(MD5_word *)binary == MD5_out[0][0] ||
*(MD5_word *)binary == MD5_out[1][0])
return 1;
#else
if (*(MD5_word *)binary == MD5_out[0][0])
return 1;
#endif
}
return 0;
#endif
}
static int cmp_one(void *binary, int index)
{
#ifdef MD5_SSE_PARA
unsigned int x,y;
x = index&3;
y = index/4;
if( ((unsigned int *)binary)[0] != ((unsigned int *)sout)[x+y*MMX_COEF*4] )
return 0;
if( ((unsigned int *)binary)[1] != ((unsigned int *)sout)[x+y*MMX_COEF*4+4] )
return 0;
if( ((unsigned int *)binary)[2] != ((unsigned int *)sout)[x+y*MMX_COEF*4+8] )
return 0;
if( ((unsigned int *)binary)[3] != ((unsigned int *)sout)[x+y*MMX_COEF*4+12] )
return 0;
return 1;
#else
init_t();
return *(MD5_word *)binary == MD5_out[index][0];
#endif
}
static int cmp_exact(char *source, int index)
{
#ifdef MD5_SSE_PARA
return 1;
#else
init_t();
return !memcmp(MD5_std_get_binary(source), MD5_out[index],
sizeof(MD5_binary));
#endif
}
static void set_salt(void *salt)
{
#ifdef MD5_SSE_PARA
memcpy(cursalt, salt, SALT_SIZE);
CryptType = cursalt[8];
cursalt[8] = 0;
#endif
MD5_std_set_salt(salt);
}
static void *get_salt(char *ciphertext) {
return MD5_std_get_salt(ciphertext);
}
static void *get_binary(char *ciphertext) {
return MD5_std_get_binary(ciphertext);
}
struct fmt_main fmt_MD5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"MD5 " MD5_ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if MD5_std_mt || defined(MD5_SSE_PARA)
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
|
AlgebraicTriangleCounting.h | /*
* AlgebraicTriangleCounting.h
*
* Created on: Jul 12, 2016
* Author: Michael Wegner (michael.wegner@student.kit.edu)
*/
#ifndef NETWORKIT_CPP_ALGEBRAIC_ALGORITHMS_ALGEBRAICTRIANGLECOUNTING_H_
#define NETWORKIT_CPP_ALGEBRAIC_ALGORITHMS_ALGEBRAICTRIANGLECOUNTING_H_
#include "../../base/Algorithm.h"
namespace NetworKit {
/**
* @ingroup algebraic
* Implements a triangle counting algorithm for nodes based on algebraic methods.
*/
template<class Matrix>
class AlgebraicTriangleCounting : public Algorithm {
public:
/**
* Creates an instance of AlgebraicTriangleCounting for the given Graph @a graph.
* @param graph
*/
AlgebraicTriangleCounting(const Graph& graph) : A(Matrix::adjacencyMatrix(graph)), directed(graph.isDirected()) {}
/**
* Computes the number of triangles each node is part of. A triangle is considered as a set of nodes (i.e. if there
* is a triangle (u,v,w) it only counts as one triangle at each node).
*/
void run() override;
/**
* Returns the score of node @a u.
* @param u
*/
count score(node u) const {
if (!hasRun) throw std::runtime_error("AlgebraicTriangleCounting::score(node u): Call run() method first.");
assert(u < A.numberOfRows());
return nodeScores[u];
}
/**
* Returns the scores for all nodes of the graph. If @a moveOut is set to true (false by default) then the scores
* are std::moved such that no copy is constructed.
* @param moveOut
*/
std::vector<count> getScores(bool moveOut = false) {
if (!hasRun) throw std::runtime_error("AlgebraicTriangleCounting::getScores(): Call run() method first.");
hasRun = !moveOut;
return moveOut? std::move(nodeScores) : nodeScores;
}
private:
Matrix A;
bool directed;
std::vector<count> nodeScores;
};
template<class Matrix>
void AlgebraicTriangleCounting<Matrix>::run() {
Matrix powA = A * A * A;
nodeScores.clear();
nodeScores.resize(A.numberOfRows(), 0);
#pragma omp parallel for
for (omp_index i = 0; i < static_cast<omp_index>(powA.numberOfRows()); ++i) {
nodeScores[i] = directed? powA(i,i) : powA(i,i) / 2.0;
}
hasRun = true;
}
} /* namespace NetworKit */
#endif /* NETWORKIT_CPP_ALGEBRAIC_ALGORITHMS_ALGEBRAICTRIANGLECOUNTING_H_ */
|
GB_unop__identity_fp64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp64_int64)
// op(A') function: GB (_unop_tran__identity_fp64_int64)
// C type: double
// A type: int64_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp64_int64)
(
double *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp64_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
coordinator.c | /*
This source file is part of GAME-DA, which is released under the MIT license.
Github repository: https://github.com/OpenNWP/GAME-DA
*/
/*
This file coordinates the data assimilation process.
*/
#include <stdlib.h>
#include "enum.h"
#include "game-da.h"
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <netcdf.h>
#include <geos95.h>
#include <atmostracers.h>
#define NCERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(2);}
#define EPSILON 1e-4
#define SCALE_HEIGHT 8000.0
#define P_0 100000
int obs_op_setup(double [], double [][NO_OF_REL_MODEL_DOFS_PER_OBS], int [][NO_OF_REL_MODEL_DOFS_PER_OBS], double [], double [], double [], double [], double [], double [], double []);
int obs_op_setup_wind(double [], double [][NO_OF_REL_MODEL_DOFS_PER_OBS], int [][NO_OF_REL_MODEL_DOFS_PER_OBS], double [], double [], double [], double [],
double [], double [], double [], double []);
int main(int argc, char *argv[])
{
if (fmod(NO_OF_REL_MODEL_DOFS_PER_OBS, 2) == 1)
{
printf("NO_OF_REL_MODEL_DOFS_PER_OBS must be even.\n");
printf("Aborting.\n");
exit(1);
}
double R_D = specific_gas_constants_lookup(0);
double C_D_P = spec_heat_capacities_p_gas_lookup(0);
char year_string[strlen(argv[1]) + 1];
strcpy(year_string, argv[1]);
char month_string[strlen(argv[2]) + 1];
strcpy(month_string, argv[2]);
char day_string[strlen(argv[3]) + 1];
strcpy(day_string, argv[3]);
char hour_string[strlen(argv[4]) + 1];
strcpy(hour_string, argv[4]);
char model_home_dir[strlen(argv[5]) + 1];
strcpy(model_home_dir, argv[5]);
int ORO_ID;
ORO_ID = strtod(argv[6], NULL);
char BACKGROUND_STATE_FILE[strlen(argv[7]) + 1];
strcpy(BACKGROUND_STATE_FILE, argv[7]);
char game_da_root_dir[strlen(argv[8]) + 1];
strcpy(game_da_root_dir, argv[8]);
printf("Background state file: %s\n", BACKGROUND_STATE_FILE);
// Allocating memory for the grid properties.
double *latitudes_model = malloc(NO_OF_SCALARS_H*sizeof(double));
double *longitudes_model = malloc(NO_OF_SCALARS_H*sizeof(double));
double *z_coords_model = malloc(NO_OF_SCALARS*sizeof(double));
double *latitudes_model_wind = malloc(NO_OF_VECTORS_H*sizeof(double));
double *longitudes_model_wind = malloc(NO_OF_VECTORS_H*sizeof(double));
double *directions = malloc(NO_OF_VECTORS_H*sizeof(double));
double *z_coords_model_wind = malloc(NO_OF_VECTORS*sizeof(double));
double *gravity_potential_model = malloc(NO_OF_SCALARS*sizeof(double));
double *normal_distance = malloc(NO_OF_VECTORS*sizeof(double));
int *from_index = malloc(NO_OF_VECTORS_H*sizeof(int));
int *to_index = malloc(NO_OF_VECTORS_H*sizeof(int));
int *adjacent_vector_indices_h = malloc(6*NO_OF_SCALARS_H*sizeof(int));
// Reading the grid properties.
int ncid_grid, retval;
char GEO_PROP_FILE_PRE[200];
sprintf(GEO_PROP_FILE_PRE, "%s/grid_generator/grids/RES%d_L%d_ORO%d.nc", model_home_dir, RES_ID, NO_OF_LAYERS, ORO_ID);
char GEO_PROP_FILE[strlen(GEO_PROP_FILE_PRE) + 1];
strcpy(GEO_PROP_FILE, GEO_PROP_FILE_PRE);
printf("Grid file: %s\n", GEO_PROP_FILE);
printf("Reading grid file ...\n");
if ((retval = nc_open(GEO_PROP_FILE, NC_NOWRITE, &ncid_grid)))
NCERR(retval);
int latitudes_model_id, latitudes_model_wind_id, longitudes_model_id, longitudes_model_wind_id, z_coords_model_id,
z_coords_model_wind_id, gravity_potential_model_id, normal_distance_id, from_index_id, to_index_id, adjacent_vector_indices_h_id, directions_id;
if ((retval = nc_inq_varid(ncid_grid, "latitude_scalar", &latitudes_model_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid_grid, "longitude_scalar", &longitudes_model_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid_grid, "z_scalar", &z_coords_model_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid_grid, "latitude_vector", &latitudes_model_wind_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid_grid, "longitude_vector", &longitudes_model_wind_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid_grid, "direction", &directions_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid_grid, "z_vector", &z_coords_model_wind_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid_grid, "gravity_potential", &gravity_potential_model_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid_grid, "normal_distance", &normal_distance_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid_grid, "from_index", &from_index_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid_grid, "to_index", &to_index_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid_grid, "adjacent_vector_indices_h", &adjacent_vector_indices_h_id)))
NCERR(retval);
if ((retval = nc_get_var_int(ncid_grid, from_index_id, &from_index[0])))
NCERR(retval);
if ((retval = nc_get_var_int(ncid_grid, to_index_id, &to_index[0])))
NCERR(retval);
if ((retval = nc_get_var_int(ncid_grid, adjacent_vector_indices_h_id, &adjacent_vector_indices_h[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid_grid, latitudes_model_id, &latitudes_model[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid_grid, longitudes_model_id, &longitudes_model[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid_grid, z_coords_model_id, &z_coords_model[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid_grid, latitudes_model_wind_id, &latitudes_model_wind[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid_grid, longitudes_model_wind_id, &longitudes_model_wind[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid_grid, directions_id, &directions[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid_grid, z_coords_model_wind_id, &z_coords_model_wind[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid_grid, gravity_potential_model_id, &gravity_potential_model[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid_grid, normal_distance_id, &normal_distance[0])))
NCERR(retval);
if ((retval = nc_close(ncid_grid)))
NCERR(retval);
printf("Grid file read.\n");
char output_file_pre[200];
sprintf(output_file_pre, "%s/nwp_init/%s%s%s%s.nc", model_home_dir, year_string, month_string, day_string, hour_string);
char output_file[strlen(output_file_pre) + 1];
strcpy(output_file, output_file_pre);
// These are the arrays of the background state.
double *densities_background = malloc(6*NO_OF_SCALARS*sizeof(double));
double *temperatures_background = malloc(5*NO_OF_SCALARS*sizeof(double));
double *wind_background = malloc(NO_OF_VECTORS*sizeof(double));
// Reading the background state.
printf("Reading background state ...\n");
int ncid;
if ((retval = nc_open(BACKGROUND_STATE_FILE, NC_NOWRITE, &ncid)))
NCERR(retval);
int densities_background_id, temperatures_background_id, wind_background_id;
if ((retval = nc_inq_varid(ncid, "densities", &densities_background_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid, "temperatures", &temperatures_background_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid, "wind", &wind_background_id)))
NCERR(retval);
if ((retval = nc_get_var_double(ncid, densities_background_id, &densities_background[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid, temperatures_background_id, &temperatures_background[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid, wind_background_id, &wind_background[0])))
NCERR(retval);
if ((retval = nc_close(ncid)))
NCERR(retval);
printf("Background state read.\n");
// saving the relevant part of the background state in one array
double *background_dry = malloc(NO_OF_MODEL_DOFS_DRY*sizeof(double));
#pragma omp parallel for
for (int i = 0; i < NO_OF_MODEL_DOFS_DRY; ++i)
{
// temperature of the gas phase of the background state
if (i < NO_OF_SCALARS)
{
background_dry[i] = temperatures_background[4*NO_OF_SCALARS + i];
}
// dry air density in the lowest layer of the background state
else
{
background_dry[i] = densities_background[4*NO_OF_SCALARS + i - NO_OF_SCALARS_H];
}
}
free(temperatures_background);
// Allocating the memory for the observations.
double *latitude_vector_obs = malloc(NO_OF_CHOSEN_OBSERVATIONS*sizeof(double));
double *longitude_vector_obs = malloc(NO_OF_CHOSEN_OBSERVATIONS*sizeof(double));
double *z_coords_obs = malloc(NO_OF_CHOSEN_OBSERVATIONS*sizeof(double));
double *observations_vector = malloc(NO_OF_CHOSEN_OBSERVATIONS*sizeof(double));
char OBSERVATIONS_FILE_PRE[200];
sprintf(OBSERVATIONS_FILE_PRE, "%s/input/obs_%s%s%s%s.nc", game_da_root_dir, year_string, month_string, day_string, hour_string);
char OBSERVATIONS_FILE[strlen(OBSERVATIONS_FILE_PRE) + 1];
strcpy(OBSERVATIONS_FILE, OBSERVATIONS_FILE_PRE);
printf("Observations file: %s\n", OBSERVATIONS_FILE);
// Reading the observations.
printf("Reading observations ...\n");
if ((retval = nc_open(OBSERVATIONS_FILE, NC_NOWRITE, &ncid)))
NCERR(retval);
int latitude_obs_id, longitude_obs_id, z_coords_id, obervations_id;
// Defining the variables.
if ((retval = nc_inq_varid(ncid, "latitude_vector", &latitude_obs_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid, "longitude_vector", &longitude_obs_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid, "z_coords_obs", &z_coords_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid, "observations_vector", &obervations_id)))
NCERR(retval);
if ((retval = nc_get_var_double(ncid, latitude_obs_id, &latitude_vector_obs[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid, longitude_obs_id, &longitude_vector_obs[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid, z_coords_id, &z_coords_obs[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid, obervations_id, &observations_vector[0])))
NCERR(retval);
if ((retval = nc_close(ncid)))
NCERR(retval);
printf("Observations read.\n");
// Begin of the actual assimilation.
/*
DRY THERMODYNAMIC STATE ASSIMILATION
------------------------------------
*/
printf("Starting the dry assimilation ...\n");
// setting up the observations operator
double *interpolated_model_dry = malloc(NO_OF_CHOSEN_OBSERVATIONS_DRY*sizeof(double));
double (*obs_op_jacobian_reduced_matrix_dry)[NO_OF_REL_MODEL_DOFS_PER_OBS] = malloc(sizeof(double[NO_OF_CHOSEN_OBSERVATIONS_DRY][NO_OF_REL_MODEL_DOFS_PER_OBS]));
int (*relevant_model_dofs_matrix_dry)[NO_OF_REL_MODEL_DOFS_PER_OBS] = malloc(sizeof(int[NO_OF_CHOSEN_OBSERVATIONS_DRY][NO_OF_REL_MODEL_DOFS_PER_OBS]));
// setting up the obervations operator
obs_op_setup(interpolated_model_dry, obs_op_jacobian_reduced_matrix_dry, relevant_model_dofs_matrix_dry,
latitude_vector_obs, longitude_vector_obs, z_coords_obs, latitudes_model, longitudes_model, z_coords_model, background_dry);
free(z_coords_model);
// now, all the constituents of the gain matrix are known
double *observations_vector_dry = malloc(NO_OF_CHOSEN_OBSERVATIONS_DRY*sizeof(double));
// setting up the dry observations vector
#pragma omp parallel for
for (int i = 0; i < NO_OF_CHOSEN_OBSERVATIONS_DRY; ++i)
{
if (i < NO_OF_CHOSEN_OBSERVATIONS_DRY - NO_OF_CHOSEN_POINTS_PER_LAYER_OBS)
{
observations_vector_dry[i] = observations_vector[i];
}
else
{
observations_vector_dry[i] = observations_vector[NO_OF_CHOSEN_OBSERVATIONS_MOIST + i];
}
}
// setting up the measurement error covariance matrix
double *obs_error_cov_dry = malloc(sizeof(double[NO_OF_CHOSEN_OBSERVATIONS_DRY]));
double temperature_error_obs = 0.25;
double pressure_error_obs = 100;
#pragma omp parallel for
for (int i = 0; i < NO_OF_CHOSEN_OBSERVATIONS_DRY; ++i)
{
if (i < NO_OF_CHOSEN_OBSERVATIONS_DRY - NO_OF_CHOSEN_POINTS_PER_LAYER_OBS)
{
obs_error_cov_dry[i] = pow(temperature_error_obs, 2);
}
else
{
obs_error_cov_dry[i] = pow(pressure_error_obs, 2);
}
}
// setting up the background error covariance matrix (only the diagonal)
double (*bg_error_cov_dry)[7] = calloc(1, sizeof(double[NO_OF_MODEL_DOFS_DRY][7]));
double temperature_error_model = 1;
double pressure_error_model = 400;
double e_folding_length, distance;
e_folding_length = 750e3;
int no_of_edges, layer_index, h_index;;
#pragma omp parallel for private(distance, layer_index, h_index, no_of_edges)
for (int i = 0; i < NO_OF_MODEL_DOFS_DRY; ++i)
{
layer_index = i/NO_OF_SCALARS_H;
h_index = i - layer_index*NO_OF_SCALARS_H;
if (layer_index == NO_OF_LAYERS)
{
layer_index = NO_OF_LAYERS - 1;
}
no_of_edges = 6;
if (h_index < NO_OF_PENTAGONS)
{
no_of_edges = 5;
}
// diagonal terms
if (i < NO_OF_SCALARS)
{
bg_error_cov_dry[i][0] = pow(temperature_error_model, 2);
}
else
{
// density = p/(R_D*T) (Gauss'ian error propagation)
bg_error_cov_dry[i][0] = pow(pressure_error_model/(R_D*background_dry[i - NO_OF_SCALARS_H]), 2) + pow(background_dry[i]/background_dry[i - NO_OF_SCALARS_H]*temperature_error_model, 2);
}
// non-diagonal terms
for (int j = 1; j < no_of_edges + 1; ++j)
{
distance = normal_distance[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + adjacent_vector_indices_h[6*h_index + j - 1]];
bg_error_cov_dry[i][j] = bg_error_cov_dry[i][0]*exp(-distance/e_folding_length);
}
}
/*
Calling the optimum interpolation
---------------------------------
*/
double *model_vector_dry = malloc((NO_OF_SCALARS + NO_OF_SCALARS_H)*sizeof(double));
oi(obs_error_cov_dry, obs_op_jacobian_reduced_matrix_dry, relevant_model_dofs_matrix_dry, bg_error_cov_dry, interpolated_model_dry,
background_dry, observations_vector_dry, model_vector_dry, NO_OF_CHOSEN_OBSERVATIONS_DRY, NO_OF_MODEL_DOFS_DRY);
// freeing the memory
free(obs_error_cov_dry);
free(bg_error_cov_dry);
free(interpolated_model_dry);
free(background_dry);
free(observations_vector_dry);
// data assimilation is finished at this point
// These are the arrays for the result of the assimilation process.
double *density_dry = malloc(NO_OF_SCALARS*sizeof(double));
double *wind = malloc(NO_OF_VECTORS*sizeof(double));
double *exner = malloc(NO_OF_SCALARS*sizeof(double));
// density is determined out of the hydrostatic equation
double b, c;
for (int i = NO_OF_SCALARS - 1; i >= 0; --i)
{
layer_index = i/NO_OF_SCALARS_H;
h_index = i - layer_index*NO_OF_SCALARS_H;
// at the lowest layer the density is part of the model_vector_dry
if (layer_index == NO_OF_LAYERS - 1)
{
density_dry[i] = model_vector_dry[NO_OF_SCALARS + h_index];
exner[i] = pow((density_dry[i]*R_D*model_vector_dry[i])/P_0, R_D/C_D_P);
}
else
{
// solving a quadratic equation for the Exner pressure
b = -0.5*exner[i + NO_OF_SCALARS_H]/model_vector_dry[i + NO_OF_SCALARS_H]
*(model_vector_dry[i] - model_vector_dry[i + NO_OF_SCALARS_H]
+ 2/C_D_P*(gravity_potential_model[i] - gravity_potential_model[i + NO_OF_SCALARS_H]));
c = pow(exner[i + NO_OF_SCALARS_H], 2)*model_vector_dry[i]/model_vector_dry[i + NO_OF_SCALARS_H];
exner[i] = b + pow((pow(b, 2) + c), 0.5);
density_dry[i] = P_0*pow(exner[i], C_D_P/R_D)/(R_D*model_vector_dry[i]);
}
}
free(gravity_potential_model);
free(exner);
// end of the assimilation of the dry thermodynamic state
printf("Dry assimilation completed.\n");
/*
MOISTURE ASSIMILATION
---------------------
*/
printf("Starting the moist assimilation ...\n");
// setting up the background error covariance matrix (only the diagonal)
double (*bg_error_cov_moist)[7] = calloc(1, sizeof(double[NO_OF_MODEL_DOFS_MOIST][7]));
double spec_hum_error_model = 0.01;
#pragma omp parallel for private(distance, layer_index, h_index, no_of_edges)
for (int i = 0; i < NO_OF_MODEL_DOFS_MOIST; ++i)
{
// diagonal terms
bg_error_cov_moist[i][0] = pow(spec_hum_error_model, 2);
layer_index = i/NO_OF_SCALARS_H;
h_index = i - layer_index*NO_OF_SCALARS_H;
if (layer_index == NO_OF_LAYERS)
{
layer_index = NO_OF_LAYERS - 1;
}
no_of_edges = 6;
if (h_index < NO_OF_PENTAGONS)
{
no_of_edges = 5;
}
// non-diagonal terms
for (int j = 1; j < no_of_edges + 1; ++j)
{
distance = normal_distance[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + adjacent_vector_indices_h[6*h_index + j - 1]];
bg_error_cov_moist[i][j] = bg_error_cov_moist[i][0]*exp(-distance/e_folding_length);
}
}
free(normal_distance);
free(from_index);
free(to_index);
free(adjacent_vector_indices_h);
// writing the background state into a single vector
double *background_moist = malloc(NO_OF_MODEL_DOFS_MOIST*sizeof(double));
// the data assimilation is being calculated with the specific humidity for pragmatic reasons
#pragma omp parallel for
for (int i = 0; i < NO_OF_MODEL_DOFS_MOIST; ++i)
{
background_moist[i] = densities_background[5*NO_OF_SCALARS + i]/(densities_background[4*NO_OF_SCALARS + i] + densities_background[5*NO_OF_SCALARS + i]);
}
// setting up the observations operator
double *interpolated_model_moist = malloc(NO_OF_CHOSEN_OBSERVATIONS_MOIST*sizeof(double));
double (*obs_op_jacobian_reduced_matrix_moist)[NO_OF_REL_MODEL_DOFS_PER_OBS] = malloc(sizeof(double[NO_OF_CHOSEN_OBSERVATIONS_MOIST][NO_OF_REL_MODEL_DOFS_PER_OBS]));
int (*relevant_model_dofs_matrix_moist)[NO_OF_REL_MODEL_DOFS_PER_OBS] = malloc(sizeof(int[NO_OF_CHOSEN_OBSERVATIONS_MOIST][NO_OF_REL_MODEL_DOFS_PER_OBS]));
// setting up the moist observations operator using the dry observations operator
for (int i = 0; i < NO_OF_CHOSEN_OBSERVATIONS_MOIST; ++i)
{
interpolated_model_moist[i] = 0;
for (int j = 0; j < NO_OF_REL_MODEL_DOFS_PER_OBS; ++j)
{
obs_op_jacobian_reduced_matrix_moist[i][j] = obs_op_jacobian_reduced_matrix_dry[i][j];
relevant_model_dofs_matrix_moist[i][j] = relevant_model_dofs_matrix_dry[i][j];
interpolated_model_moist[i] += obs_op_jacobian_reduced_matrix_moist[i][j]*background_moist[relevant_model_dofs_matrix_moist[i][j]];
}
}
free(obs_op_jacobian_reduced_matrix_dry);
free(relevant_model_dofs_matrix_dry);
// writing the moist observations into a single vector
double *observations_vector_moist = malloc(NO_OF_CHOSEN_OBSERVATIONS_MOIST*sizeof(double));
#pragma omp parallel for
for (int i = 0; i < NO_OF_CHOSEN_OBSERVATIONS_MOIST; ++i)
{
observations_vector_moist[i] = observations_vector[NO_OF_CHOSEN_OBSERVATIONS_MOIST + i];
}
// setting up the measurement error covariance matrix
double *obs_error_cov_moist = malloc(sizeof(double[NO_OF_CHOSEN_OBSERVATIONS_MOIST]));
double spec_hum_error_obs = 0.0025;
#pragma omp parallel for
for (int i = 0; i < NO_OF_CHOSEN_OBSERVATIONS_MOIST; ++i)
{
obs_error_cov_moist[i] = pow(spec_hum_error_obs, 2);
}
/*
Calling the optimum interpolation
---------------------------------
*/
double *model_vector_moist = malloc(NO_OF_MODEL_DOFS_MOIST*sizeof(double));
oi(obs_error_cov_moist, obs_op_jacobian_reduced_matrix_moist, relevant_model_dofs_matrix_moist, bg_error_cov_moist, interpolated_model_moist,
background_moist, observations_vector_moist, model_vector_moist, NO_OF_CHOSEN_OBSERVATIONS_MOIST, NO_OF_MODEL_DOFS_MOIST);
// freeing arrays we do not need anymore
free(obs_op_jacobian_reduced_matrix_moist);
free(background_moist);
free(relevant_model_dofs_matrix_moist);
free(obs_error_cov_moist);
free(bg_error_cov_moist);
free(interpolated_model_moist);
free(observations_vector_moist);
printf("Moist assimilation completed.\n");
/*
WIND ASSIMILATION
-----------------
*/
printf("Starting the wind assimilation ...\n");
// writing the background state into a single vector
double *background_wind = malloc(NO_OF_H_VECTORS*sizeof(double));
#pragma omp parallel for private(layer_index, h_index)
for (int i = 0; i < NO_OF_H_VECTORS; ++i)
{
layer_index = i/NO_OF_VECTORS_H;
h_index = i - layer_index*NO_OF_VECTORS_H;
background_wind[i] = wind_background[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index];
}
double *interpolated_model_wind = malloc(NO_OF_CHOSEN_OBSERVATIONS_WIND*sizeof(double));
double (*obs_op_jacobian_reduced_matrix_wind)[NO_OF_REL_MODEL_DOFS_PER_OBS] = malloc(sizeof(double[NO_OF_CHOSEN_OBSERVATIONS_WIND][NO_OF_REL_MODEL_DOFS_PER_OBS]));
int (*relevant_model_dofs_matrix_wind)[NO_OF_REL_MODEL_DOFS_PER_OBS] = malloc(sizeof(int[NO_OF_CHOSEN_OBSERVATIONS_WIND][NO_OF_REL_MODEL_DOFS_PER_OBS]));
// setting up the obervations operator
obs_op_setup_wind(interpolated_model_wind, obs_op_jacobian_reduced_matrix_wind, relevant_model_dofs_matrix_wind,
latitude_vector_obs, longitude_vector_obs, z_coords_obs, latitudes_model_wind, longitudes_model_wind, z_coords_model_wind, directions, background_wind);
free(z_coords_model_wind);
free(directions);
free(z_coords_obs);
free(latitudes_model_wind);
free(longitudes_model_wind);
// writing the observations into one vector
double *observations_vector_wind = malloc(NO_OF_CHOSEN_OBSERVATIONS_WIND*sizeof(double));
#pragma omp parallel for
for (int i = 0; i < NO_OF_CHOSEN_OBSERVATIONS_WIND; ++i)
{
observations_vector_wind[i] = observations_vector[NO_OF_CHOSEN_OBSERVATIONS_DRY + NO_OF_CHOSEN_OBSERVATIONS_MOIST + i];
}
// setting the wind observations error covariance matrix
double *obs_error_cov_wind = malloc(sizeof(double[NO_OF_CHOSEN_OBSERVATIONS_WIND]));
double wind_error_obs = 0.5;
#pragma omp parallel for
for (int i = 0; i < NO_OF_CHOSEN_OBSERVATIONS_WIND; ++i)
{
obs_error_cov_wind[i] = pow(wind_error_obs, 2);
}
// setting the background error covariance matrix
double (*bg_error_cov_wind)[7] = calloc(1, sizeof(double[NO_OF_H_VECTORS][7]));
double wind_error_model = 0.5;
#pragma omp parallel for
for (int i = 0; i < NO_OF_H_VECTORS; ++i)
{
// diagonal terms
bg_error_cov_wind[i][0] = pow(wind_error_model, 2);
// non-diagonal terms
no_of_edges = 6;
for (int j = 1; j < no_of_edges + 1; ++j)
{
distance = 240e3;
bg_error_cov_wind[i][j] = bg_error_cov_wind[i][0]*exp(-distance/e_folding_length);
}
}
/*
Calling the optimum interpolation
---------------------------------
*/
double *model_vector_wind = malloc(NO_OF_H_VECTORS*sizeof(double));
oi(obs_error_cov_wind, obs_op_jacobian_reduced_matrix_wind, relevant_model_dofs_matrix_wind, bg_error_cov_wind, interpolated_model_wind,
background_wind, observations_vector_wind, model_vector_wind, NO_OF_CHOSEN_OBSERVATIONS_WIND, NO_OF_H_VECTORS);
// freeing the memory
free(obs_op_jacobian_reduced_matrix_wind);
free(obs_error_cov_wind);
free(bg_error_cov_wind);
free(interpolated_model_wind);
free(background_wind);
free(observations_vector_wind);
printf("Wind assimilation completed.\n");
/*
INTERPOLATION OF THE SST
------------------------
*/
printf("Interpolating the SST to the model grid ...\n");
double *sst = malloc(NO_OF_SCALARS_H*sizeof(double));
int min_index;
#pragma omp parallel for private(min_index)
for (int i = 0; i < NO_OF_SCALARS_H; ++i)
{
double *distance_vector = malloc(NO_OF_SST_POINTS*sizeof(double));
for (int j = 0; j < NO_OF_SST_POINTS; ++j)
{
distance_vector[j] = calculate_distance_h(latitude_vector_obs[NO_OF_CHOSEN_OBSERVATIONS - NO_OF_SST_POINTS + j],
longitude_vector_obs[NO_OF_CHOSEN_OBSERVATIONS - NO_OF_SST_POINTS + j], latitudes_model[i], longitudes_model[i], 1);
}
min_index = find_min_index(distance_vector, NO_OF_SST_POINTS);
sst[i] = observations_vector[NO_OF_CHOSEN_OBSERVATIONS - NO_OF_SST_POINTS + min_index];
free(distance_vector);
}
free(latitudes_model);
free(longitudes_model);
free(latitude_vector_obs);
free(longitude_vector_obs);
free(observations_vector);
printf("Interpolation of the SST completed.\n");
/*
PREPARING THE OUTPUT
--------------------
*/
// individual condensate temperatures are for higher resolutions, not yet implemented
// clouds and precipitation are set equal to the background state
double *densities = malloc(6*NO_OF_SCALARS*sizeof(double));
double *temperatures = malloc(5*NO_OF_SCALARS*sizeof(double));
#pragma omp parallel for
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
// setting the mass densities of the result
// condensate densities are not assimilated
densities[i] = densities_background[i];
densities[NO_OF_SCALARS + i] = densities_background[NO_OF_SCALARS + i];
densities[2*NO_OF_SCALARS + i] = densities_background[2*NO_OF_SCALARS + i];
densities[3*NO_OF_SCALARS + i] = densities_background[3*NO_OF_SCALARS + i];
densities[4*NO_OF_SCALARS + i] = density_dry[i];
densities[5*NO_OF_SCALARS + i] = model_vector_moist[i]/(1 - model_vector_moist[i])*density_dry[i];
if (densities[5*NO_OF_SCALARS + i] < 0)
{
densities[5*NO_OF_SCALARS + i] = 0;
}
// setting the temperatures of the result
// assuming an LTE (local thermodynamic equilibrium)
temperatures[i] = model_vector_dry[i];
temperatures[NO_OF_SCALARS + i] = model_vector_dry[i];
temperatures[2*NO_OF_SCALARS + i] = model_vector_dry[i];
temperatures[3*NO_OF_SCALARS + i] = model_vector_dry[i];
temperatures[4*NO_OF_SCALARS + i] = model_vector_dry[i];
}
free(model_vector_dry);
free(density_dry);
free(model_vector_moist);
free(densities_background);
// writing the result of the wind data assimilation to the resulting wind field
#pragma omp parallel for private(layer_index, h_index)
for (int i = 0; i < NO_OF_VECTORS; ++i)
{
layer_index = i/NO_OF_VECTORS_PER_LAYER;
h_index = i - layer_index*NO_OF_VECTORS_PER_LAYER;
if (h_index < NO_OF_SCALARS_H)
{
wind[i] = wind_background[i];
}
else
{
wind[i] = model_vector_wind[layer_index*NO_OF_VECTORS_H + h_index - NO_OF_SCALARS_H];
}
}
free(model_vector_wind);
free(wind_background);
/*
writing the result to a netcdf file
-----------------------------------
*/
printf("Output file: %s\n", output_file);
printf("Writing result to output file ...\n");
int densities_dimid, temperatures_dimid, vector_dimid, scalar_h_dimid, single_double_dimid, densities_id, temperatures_id, wind_id, sst_id;
if ((retval = nc_create(output_file, NC_CLOBBER, &ncid)))
NCERR(retval);
if ((retval = nc_def_dim(ncid, "densities_index", 6*NO_OF_SCALARS, &densities_dimid)))
NCERR(retval);
if ((retval = nc_def_dim(ncid, "temperatures_index", 5*NO_OF_SCALARS, &temperatures_dimid)))
NCERR(retval);
if ((retval = nc_def_dim(ncid, "vector_index", NO_OF_VECTORS, &vector_dimid)))
NCERR(retval);
if ((retval = nc_def_dim(ncid, "scalar_h_index", NO_OF_SCALARS_H, &scalar_h_dimid)))
NCERR(retval);
if ((retval = nc_def_dim(ncid, "single_double_dimid_index", 1, &single_double_dimid)))
NCERR(retval);
if ((retval = nc_def_var(ncid, "densities", NC_DOUBLE, 1, &densities_dimid, &densities_id)))
NCERR(retval);
if ((retval = nc_put_att_text(ncid, densities_id, "units", strlen("kg/m^3"), "kg/m^3")))
NCERR(retval);
if ((retval = nc_def_var(ncid, "temperatures", NC_DOUBLE, 1, &temperatures_dimid, &temperatures_id)))
NCERR(retval);
if ((retval = nc_put_att_text(ncid, temperatures_id, "units", strlen("K"), "K")))
NCERR(retval);
if ((retval = nc_def_var(ncid, "wind", NC_DOUBLE, 1, &vector_dimid, &wind_id)))
NCERR(retval);
if ((retval = nc_put_att_text(ncid, wind_id, "units", strlen("m/s"), "m/s")))
NCERR(retval);
if ((retval = nc_def_var(ncid, "sst", NC_DOUBLE, 1, &scalar_h_dimid, &sst_id)))
NCERR(retval);
if ((retval = nc_put_att_text(ncid, sst_id, "units", strlen("K"), "K")))
NCERR(retval);
if ((retval = nc_enddef(ncid)))
NCERR(retval);
if ((retval = nc_put_var_double(ncid, densities_id, &densities[0])))
NCERR(retval);
if ((retval = nc_put_var_double(ncid, temperatures_id, &temperatures[0])))
NCERR(retval);
if ((retval = nc_put_var_double(ncid, wind_id, &wind[0])))
NCERR(retval);
if ((retval = nc_put_var_double(ncid, sst_id, &sst[0])))
NCERR(retval);
if ((retval = nc_close(ncid)))
NCERR(retval);
printf("Result successfully written.\n");
// freeing the stil occupied memory
free(densities);
free(temperatures);
free(wind);
free(sst);
// that's it
return 0;
}
int obs_op_setup(double interpolated_model[], double obs_op_jacobian_reduced_matrix[][NO_OF_REL_MODEL_DOFS_PER_OBS], int relevant_model_dofs_matrix[][NO_OF_REL_MODEL_DOFS_PER_OBS], double lat_used_obs[], double lon_used_obs[], double z_used_obs[], double lat_model[], double lon_model[], double z_model[], double background[])
{
/*
This functions calculates the observations operator.
It is the background state, interpolated to the observations + the derivative of this function, which will be used to calculate
the perturbation induced by the observations.
*/
double R_D = specific_gas_constants_lookup(0);
// finding the NO_OF_REL_MODEL_DOFS_PER_OBS/2 closest grid points (horizontally) for each observation
// the vector containing the relevant horizontal model indices for each observation
int (*rel_h_index_vector)[NO_OF_REL_MODEL_DOFS_PER_OBS/2] = malloc(sizeof(int[NO_OF_CHOSEN_POINTS_PER_LAYER_OBS][NO_OF_REL_MODEL_DOFS_PER_OBS/2]));
#pragma omp parallel for
for (int i = 0; i < NO_OF_CHOSEN_POINTS_PER_LAYER_OBS; ++i)
{
// the vector containing the horizontal distances between the observation at hand and each horizontal model gridpoint
double *dist_vector = malloc(NO_OF_SCALARS_H*sizeof(double));
// filling up the dist_vector
for (int j = 0; j < NO_OF_SCALARS_H; ++j)
{
dist_vector[j] = calculate_distance_h(lat_used_obs[i], lon_used_obs[i], lat_model[j], lon_model[j], 1);
}
// finding the NO_OF_REL_MODEL_DOFS_PER_OBS/2 closest points
for (int j = 0; j < NO_OF_REL_MODEL_DOFS_PER_OBS/2; ++j)
{
rel_h_index_vector[i][j] = find_min_index(dist_vector, NO_OF_SCALARS_H);
dist_vector[rel_h_index_vector[i][j]] = M_PI + EPSILON;
}
free(dist_vector);
}
int layer_index, obs_index_h;
// finally setting up the reduced observations operator
#pragma omp parallel for private(layer_index, obs_index_h)
for (int obs_index = 0; obs_index < NO_OF_CHOSEN_OBSERVATIONS_DRY; ++obs_index)
{
layer_index = obs_index/NO_OF_CHOSEN_POINTS_PER_LAYER_OBS;
obs_index_h = obs_index - layer_index*NO_OF_CHOSEN_POINTS_PER_LAYER_OBS;
// the vector containing the vertical distance between the observation at hand and the model gridpoints
double vert_distance_vector[NO_OF_LAYERS];
// the vector containing preliminary interpolation weights
double weights_vector[NO_OF_REL_MODEL_DOFS_PER_OBS];
// the closest vertical indices
int closest_vert_index, other_vert_index;
double sum_of_interpol_weights, distance, closest_vert_weight, other_vert_weight;
// free atmosphere quantities (temperature)
if (obs_index < NO_OF_CHOSEN_OBSERVATIONS_DRY - NO_OF_CHOSEN_POINTS_PER_LAYER_OBS)
{
sum_of_interpol_weights = 0;
interpolated_model[obs_index] = 0;
// loop over all relevant horizontal model gridpoints
for (int j = 0; j < NO_OF_REL_MODEL_DOFS_PER_OBS/2; ++j)
{
// finding out which layer is the closest to the observation
for (int k = 0; k < NO_OF_LAYERS; ++k)
{
vert_distance_vector[k] = fabs(z_model[k*NO_OF_SCALARS_H + rel_h_index_vector[obs_index_h][j]] - z_used_obs[obs_index]);
}
closest_vert_index = find_min_index(vert_distance_vector, NO_OF_LAYERS);
// vertical interpolation
// firstly setting for the other vertical index
other_vert_index = closest_vert_index + 1;
// if the the closest model point is below the observation, the next higher point is taken into account for the interpolation
if (z_model[closest_vert_index*NO_OF_SCALARS_H + rel_h_index_vector[obs_index_h][j]] < z_used_obs[obs_index])
{
other_vert_index = closest_vert_index - 1;
}
// if the observation is below the lowest layer of the model
if (other_vert_index == NO_OF_LAYERS)
{
other_vert_index = NO_OF_LAYERS - 2;
closest_vert_weight = 1 - (z_used_obs[obs_index] - z_model[closest_vert_index*NO_OF_SCALARS_H + rel_h_index_vector[obs_index_h][j]])
/(z_model[other_vert_index*NO_OF_SCALARS_H + rel_h_index_vector[obs_index_h][j]] - z_model[closest_vert_index*NO_OF_SCALARS_H + rel_h_index_vector[obs_index_h][j]]);
}
else
{
closest_vert_weight = fabs(z_model[other_vert_index*NO_OF_SCALARS_H + rel_h_index_vector[obs_index_h][j]] - z_used_obs[obs_index])
/fabs(z_model[closest_vert_index*NO_OF_SCALARS_H + rel_h_index_vector[obs_index_h][j]] - z_model[other_vert_index*NO_OF_SCALARS_H + rel_h_index_vector[obs_index_h][j]]);
}
other_vert_weight = 1 - closest_vert_weight;
// now we know which gridpoint is relevant to this observation
// the closest vertical point
relevant_model_dofs_matrix[obs_index][j] = closest_vert_index*NO_OF_SCALARS_H + rel_h_index_vector[obs_index_h][j];
// the second closest vertical point
relevant_model_dofs_matrix[obs_index][j + NO_OF_REL_MODEL_DOFS_PER_OBS/2] = other_vert_index*NO_OF_SCALARS_H + rel_h_index_vector[obs_index_h][j];
// radius does not matter here
distance = calculate_distance_h(lat_used_obs[obs_index], lon_used_obs[obs_index], lat_model[rel_h_index_vector[obs_index_h][j]], lon_model[rel_h_index_vector[obs_index_h][j]], 1);
// 1/r-interpolation
weights_vector[j] = closest_vert_weight/pow(distance + EPSILON, INTERPOL_EXP);
weights_vector[j + NO_OF_REL_MODEL_DOFS_PER_OBS/2] = other_vert_weight/pow(distance + EPSILON, INTERPOL_EXP);
interpolated_model[obs_index] += weights_vector[j]*background[relevant_model_dofs_matrix[obs_index][j]];
interpolated_model[obs_index] += weights_vector[j + NO_OF_REL_MODEL_DOFS_PER_OBS/2]*background[relevant_model_dofs_matrix[obs_index][j + NO_OF_REL_MODEL_DOFS_PER_OBS/2]];
sum_of_interpol_weights += weights_vector[j];
sum_of_interpol_weights += weights_vector[j + NO_OF_REL_MODEL_DOFS_PER_OBS/2];
}
for (int k = 0; k < NO_OF_REL_MODEL_DOFS_PER_OBS; ++k)
{
// we have to divide by the sum of weights here
obs_op_jacobian_reduced_matrix[obs_index][k] = weights_vector[k]/sum_of_interpol_weights;
}
interpolated_model[obs_index] = interpolated_model[obs_index]/sum_of_interpol_weights;
}
// surface quantities (only surface pressure for now)
else
{
sum_of_interpol_weights = 0;
interpolated_model[obs_index] = 0;
// loop over all relevant model degrees of freedom
for (int j = 0; j < NO_OF_REL_MODEL_DOFS_PER_OBS; ++j)
{
// we pick the lowest layer here (independant on wether we look at the temperature or the density)
closest_vert_index = NO_OF_LAYERS - 1;
// How is the suface pressure affected by the temperature in the lowest layer?
if (j < NO_OF_REL_MODEL_DOFS_PER_OBS/2)
{
// radius does not matter here
distance = calculate_distance_h(lat_used_obs[obs_index], lon_used_obs[obs_index],
lat_model[rel_h_index_vector[obs_index_h][j]], lon_model[rel_h_index_vector[obs_index_h][j]], 1);
// now we know which gridpoint is relevant to this observation
relevant_model_dofs_matrix[obs_index][j] = closest_vert_index*NO_OF_SCALARS_H + rel_h_index_vector[obs_index_h][j];
// 1/r-interpolation
weights_vector[j] = 1/pow(distance + EPSILON, INTERPOL_EXP)
*R_D*background[relevant_model_dofs_matrix[obs_index][j] + NO_OF_SCALARS_H]
*exp(-(z_used_obs[NO_OF_CHOSEN_OBSERVATIONS_MOIST + obs_index] - z_model[(NO_OF_LAYERS - 1)*NO_OF_SCALARS_H + rel_h_index_vector[obs_index_h][j]])/SCALE_HEIGHT);
sum_of_interpol_weights += 1/pow(distance + EPSILON, INTERPOL_EXP);
// the result
if (j == NO_OF_REL_MODEL_DOFS_PER_OBS/2 - 1)
{
// loop over all relevant gridpoints
for (int k = 0; k < NO_OF_REL_MODEL_DOFS_PER_OBS/2; ++k)
{
// we have to divide by the sum of weights here
obs_op_jacobian_reduced_matrix[obs_index][k] = weights_vector[k]/sum_of_interpol_weights;
}
}
}
// How is the surface pressure affected by the density in the lowest layer?
else
{
// as a new interpolation will be conducted now, the sum_of_interpol_weights variable has to be reset to zero
if (j == NO_OF_REL_MODEL_DOFS_PER_OBS/2)
{
sum_of_interpol_weights = 0;
}
// radius does not matter here
distance = calculate_distance_h(lat_used_obs[obs_index], lon_used_obs[obs_index],
lat_model[rel_h_index_vector[obs_index_h][j - NO_OF_REL_MODEL_DOFS_PER_OBS/2]], lon_model[rel_h_index_vector[obs_index_h][j - NO_OF_REL_MODEL_DOFS_PER_OBS/2]], 1);
// now we know which gridpoint is relevant to this observation
relevant_model_dofs_matrix[obs_index][j] = (closest_vert_index + 1)*NO_OF_SCALARS_H + rel_h_index_vector[obs_index_h][j - NO_OF_REL_MODEL_DOFS_PER_OBS/2];
// interpolation weights
weights_vector[j] = 1/pow(distance + EPSILON, INTERPOL_EXP)
*R_D*background[relevant_model_dofs_matrix[obs_index][j] - NO_OF_SCALARS_H]
*exp(-(z_used_obs[NO_OF_CHOSEN_OBSERVATIONS_MOIST + obs_index]
- z_model[(NO_OF_LAYERS - 1)*NO_OF_SCALARS_H + rel_h_index_vector[obs_index_h][j - NO_OF_REL_MODEL_DOFS_PER_OBS/2]])/SCALE_HEIGHT);
// interpolation to the surface pressure
interpolated_model[obs_index] += weights_vector[j]*background[relevant_model_dofs_matrix[obs_index][j]];
sum_of_interpol_weights += 1/pow(distance + EPSILON, INTERPOL_EXP);
}
}
// the result
// the interpolation to the surface pressure
interpolated_model[obs_index] = interpolated_model[obs_index]/sum_of_interpol_weights;
// loop over all relevant gridpoints
for (int k = NO_OF_REL_MODEL_DOFS_PER_OBS/2; k < NO_OF_REL_MODEL_DOFS_PER_OBS; ++k)
{
// we have to divide by the sum of weights here
obs_op_jacobian_reduced_matrix[obs_index][k] = weights_vector[k]/sum_of_interpol_weights;
}
}
}
free(rel_h_index_vector);
// returning 0 indicating success
return 0;
}
int obs_op_setup_wind(double interpolated_model[], double obs_op_jacobian_reduced_matrix[][NO_OF_REL_MODEL_DOFS_PER_OBS], int relevant_model_dofs_matrix[][NO_OF_REL_MODEL_DOFS_PER_OBS], double lat_used_obs[], double lon_used_obs[], double z_used_obs[], double lat_model[], double lon_model[], double z_model[], double directions[], double background[])
{
/*
same as obs_op_setup, only for the wind
*/
// finding the NO_OF_REL_MODEL_DOFS_PER_OBS/2 closest grid points (horizontally) for each observation
// the vector containing the relevant horizontal model indices for each observation
int (*rel_h_index_vector)[NO_OF_REL_MODEL_DOFS_PER_OBS/2] = malloc(sizeof(int[NO_OF_CHOSEN_WIND_POINTS_PER_LAYER_OBS][NO_OF_REL_MODEL_DOFS_PER_OBS/2]));
#pragma omp parallel for
for (int i = 0; i < NO_OF_CHOSEN_WIND_POINTS_PER_LAYER_OBS; ++i)
{
// the vector containing the horizontal distances between the observation at hand and each horizontal model gridpoint
double *dist_vector = malloc(NO_OF_VECTORS_H*sizeof(double));
// filling up the dist_vector
for (int j = 0; j < NO_OF_VECTORS_H; ++j)
{
dist_vector[j] = calculate_distance_h(lat_used_obs[NO_OF_CHOSEN_OBSERVATIONS_DRY + NO_OF_CHOSEN_OBSERVATIONS_MOIST + i],
lon_used_obs[NO_OF_CHOSEN_OBSERVATIONS_DRY + NO_OF_CHOSEN_OBSERVATIONS_MOIST + i], lat_model[j], lon_model[j], 1);
}
// finding the NO_OF_REL_MODEL_DOFS_PER_OBS/2 closest points
for (int j = 0; j < NO_OF_REL_MODEL_DOFS_PER_OBS/2; ++j)
{
rel_h_index_vector[i][j] = find_min_index(dist_vector, NO_OF_VECTORS_H);
dist_vector[rel_h_index_vector[i][j]] = M_PI + EPSILON;
}
free(dist_vector);
}
int layer_index, obs_index_h;
// finally setting up the reduced observations operator
#pragma omp parallel for private(layer_index, obs_index_h)
for (int obs_index = 0; obs_index < NO_OF_CHOSEN_OBSERVATIONS_WIND; ++obs_index)
{
// just a helper variable for calculating obs_index_h; gets >= NO_OF_LAYERS but won't be used
layer_index = obs_index/NO_OF_CHOSEN_WIND_POINTS_PER_LAYER_OBS;
obs_index_h = obs_index - layer_index*NO_OF_CHOSEN_WIND_POINTS_PER_LAYER_OBS;
// the vector containing the vertical distance between the observation at hand and the model gridpoints
double vert_distance_vector[NO_OF_LAYERS];
// the vector containing preliminary interpolation weights
double weights_vector[NO_OF_REL_MODEL_DOFS_PER_OBS];
// the vector containing preliminary interpolation weights with sin or cos prefactors accounting for the direction of the
// vectors on the C-grid
double weights_vector_with_dir[NO_OF_REL_MODEL_DOFS_PER_OBS];
// the closest vertical indices
int closest_vert_index, other_vert_index;
double sum_of_interpol_weights, distance, closest_vert_weight, other_vert_weight, distance_factor, direction;
sum_of_interpol_weights = 0;
interpolated_model[obs_index] = 0;
// loop over all relevant horizontal model gridpoints
for (int j = 0; j < NO_OF_REL_MODEL_DOFS_PER_OBS/2; ++j)
{
// finding out which layer is the closest to the observation
for (int k = 0; k < NO_OF_LAYERS; ++k)
{
vert_distance_vector[k] = fabs(z_model[NO_OF_SCALARS_H + k*NO_OF_VECTORS_PER_LAYER + rel_h_index_vector[obs_index_h][j]]
- z_used_obs[NO_OF_CHOSEN_OBSERVATIONS_DRY + NO_OF_CHOSEN_OBSERVATIONS_MOIST + obs_index]);
}
closest_vert_index = find_min_index(vert_distance_vector, NO_OF_LAYERS);
// vertical interpolation
// firstly setting for the other vertical index
other_vert_index = closest_vert_index + 1;
// if the the closest model point is below the observation, the next higher point is taken into account for the interpolation
if (z_model[NO_OF_SCALARS_H + closest_vert_index*NO_OF_VECTORS_PER_LAYER + rel_h_index_vector[obs_index_h][j]]
< z_used_obs[NO_OF_CHOSEN_OBSERVATIONS_DRY + NO_OF_CHOSEN_OBSERVATIONS_MOIST + obs_index])
{
other_vert_index = closest_vert_index - 1;
}
// if the observation is below the lowest layer of the model
if (other_vert_index == NO_OF_LAYERS)
{
other_vert_index = NO_OF_LAYERS - 2;
closest_vert_weight = 1 - (z_used_obs[NO_OF_CHOSEN_OBSERVATIONS_DRY + NO_OF_CHOSEN_OBSERVATIONS_MOIST + obs_index]
- z_model[NO_OF_SCALARS_H + closest_vert_index*NO_OF_VECTORS_PER_LAYER + rel_h_index_vector[obs_index_h][j]])
/(z_model[NO_OF_SCALARS_H + other_vert_index*NO_OF_VECTORS_PER_LAYER + rel_h_index_vector[obs_index_h][j]]
- z_model[NO_OF_SCALARS_H + closest_vert_index*NO_OF_VECTORS_PER_LAYER + rel_h_index_vector[obs_index_h][j]]);
}
else
{
closest_vert_weight = fabs(z_model[NO_OF_SCALARS_H + other_vert_index*NO_OF_VECTORS_PER_LAYER + rel_h_index_vector[obs_index_h][j]]
- z_used_obs[NO_OF_CHOSEN_OBSERVATIONS_DRY + NO_OF_CHOSEN_OBSERVATIONS_MOIST + obs_index])
/fabs(z_model[NO_OF_SCALARS_H + closest_vert_index*NO_OF_VECTORS_PER_LAYER + rel_h_index_vector[obs_index_h][j]]
- z_model[NO_OF_SCALARS_H + other_vert_index*NO_OF_VECTORS_PER_LAYER + rel_h_index_vector[obs_index_h][j]]);
}
other_vert_weight = 1 - closest_vert_weight;
// now we know which gridpoint is relevant to this observation
// the closest vertical point
relevant_model_dofs_matrix[obs_index][j] = closest_vert_index*NO_OF_VECTORS_H + rel_h_index_vector[obs_index_h][j];
// the second closest vertical point
relevant_model_dofs_matrix[obs_index][j + NO_OF_REL_MODEL_DOFS_PER_OBS/2] = other_vert_index*NO_OF_VECTORS_H + rel_h_index_vector[obs_index_h][j];
// radius does not matter here
distance = calculate_distance_h(lat_used_obs[NO_OF_CHOSEN_OBSERVATIONS_DRY + NO_OF_CHOSEN_OBSERVATIONS_MOIST + obs_index],
lon_used_obs[NO_OF_CHOSEN_OBSERVATIONS_DRY + NO_OF_CHOSEN_OBSERVATIONS_MOIST + obs_index],
lat_model[rel_h_index_vector[obs_index_h][j]], lon_model[rel_h_index_vector[obs_index_h][j]], 1);
// interpolation weights
distance_factor = 1.0/pow(distance + EPSILON, INTERPOL_EXP);
direction = directions[rel_h_index_vector[obs_index_h][j]];
// u
if (obs_index < NO_OF_CHOSEN_OBSERVATIONS_WIND/2)
{
weights_vector[j] = closest_vert_weight*distance_factor;
weights_vector[j + NO_OF_REL_MODEL_DOFS_PER_OBS/2] = other_vert_weight*distance_factor;
weights_vector_with_dir[j] = weights_vector[j]*cos(direction);
weights_vector_with_dir[j + NO_OF_REL_MODEL_DOFS_PER_OBS/2] = weights_vector[j + NO_OF_REL_MODEL_DOFS_PER_OBS/2]*cos(direction);
}
// v
else
{
weights_vector[j] = closest_vert_weight*distance_factor;
weights_vector[j + NO_OF_REL_MODEL_DOFS_PER_OBS/2] = other_vert_weight*distance_factor;
weights_vector_with_dir[j] = weights_vector[j]*sin(direction);
weights_vector_with_dir[j + NO_OF_REL_MODEL_DOFS_PER_OBS/2] = weights_vector[j + NO_OF_REL_MODEL_DOFS_PER_OBS/2]*sin(direction);
}
interpolated_model[obs_index] += weights_vector_with_dir[j]*background[relevant_model_dofs_matrix[obs_index][j]];
interpolated_model[obs_index] += weights_vector_with_dir[j + NO_OF_REL_MODEL_DOFS_PER_OBS/2]*background[relevant_model_dofs_matrix[obs_index][j + NO_OF_REL_MODEL_DOFS_PER_OBS/2]];
sum_of_interpol_weights += weights_vector[j];
sum_of_interpol_weights += weights_vector[j + NO_OF_REL_MODEL_DOFS_PER_OBS/2];
}
for (int k = 0; k < NO_OF_REL_MODEL_DOFS_PER_OBS; ++k)
{
// we have to divide by the sum of weights here
obs_op_jacobian_reduced_matrix[obs_index][k] = weights_vector_with_dir[k]/(0.5*sum_of_interpol_weights);
}
interpolated_model[obs_index] = interpolated_model[obs_index]/(0.5*sum_of_interpol_weights);
}
free(rel_h_index_vector);
// returning 0 indicating success
return 0;
}
|
channel.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC H H AAA N N N N EEEEE L %
% C H H A A NN N NN N E L %
% C HHHHH AAAAA N N N N N N EEE L %
% C H H A A N NN N NN E L %
% CCCC H H A A N N N N EEEEE LLLLL %
% %
% %
% MagickCore Image Channel Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-private.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/exception-private.h"
#include "magick/enhance.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/resource_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m b i n e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CombineImages() combines one or more images into a single image. The
% grayscale value of the pixels of each image in the sequence is assigned in
% order to the specified channels of the combined image. The typical
% ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc.
%
% The format of the CombineImages method is:
%
% Image *CombineImages(const Image *image,const ChannelType channel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CombineImages(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
#define CombineImageTag "Combine/Image"
CacheView
*combine_view;
const Image
*next;
Image
*combine_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if ((next->columns != image->columns) || (next->rows != image->rows))
ThrowImageException(OptionError,"ImagesAreNotTheSameSize");
}
combine_image=CloneImage(image,0,0,MagickTrue,exception);
if (combine_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(combine_image,DirectClass) == MagickFalse)
{
InheritException(exception,&combine_image->exception);
combine_image=DestroyImage(combine_image);
return((Image *) NULL);
}
if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse)
{
if (fabs(image->gamma-1.0) <= MagickEpsilon)
(void) SetImageColorspace(combine_image,RGBColorspace);
else
(void) SetImageColorspace(combine_image,sRGBColorspace);
}
if ((channel & OpacityChannel) != 0)
combine_image->matte=MagickTrue;
(void) SetImageBackgroundColor(combine_image);
/*
Combine images.
*/
status=MagickTrue;
progress=0;
combine_view=AcquireAuthenticCacheView(combine_image,exception);
for (y=0; y < (ssize_t) combine_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
PixelPacket
*pixels;
register const PixelPacket
*magick_restrict p;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns,
1,exception);
if (pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
next=image;
if (((channel & RedChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & GreenChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelGreen(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & BlueChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelBlue(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) && (next != (Image *) NULL))
{
IndexPacket
*indexes;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewAuthenticIndexQueue(combine_view);
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelIndex(indexes+x,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CombineImageTag,progress++,
combine_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
combine_view=DestroyCacheView(combine_view);
if (IsGrayColorspace(combine_image->colorspace) != MagickFalse)
(void) TransformImageColorspace(combine_image,sRGBColorspace);
if (status == MagickFalse)
combine_image=DestroyImage(combine_image);
return(combine_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAlphaChannel() returns MagickFalse if the image alpha channel is
% not activated. That is, the image is RGB rather than RGBA or CMYK rather
% than CMYKA.
%
% The format of the GetImageAlphaChannel method is:
%
% MagickBooleanType GetImageAlphaChannel(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
return(image->matte);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImageChannel() separates a channel from the image and returns it as
% a grayscale image. A channel is a particular color component of each pixel
% in the image.
%
% The format of the SeparateImageChannel method is:
%
% MagickBooleanType SeparateImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channel to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel,
% YellowChannel, or BlackChannel.
%
*/
MagickExport Image *SeparateImage(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
Image
*separate_image;
MagickBooleanType
status;
/*
Initialize separate image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
separate_image=CloneImage(image,0,0,MagickTrue,exception);
if (separate_image == (Image *) NULL)
return((Image *) NULL);
status=SeparateImageChannel(separate_image,channel);
if (status == MagickFalse)
separate_image=DestroyImage(separate_image);
return(separate_image);
}
MagickExport MagickBooleanType SeparateImageChannel(Image *image,
const ChannelType channel)
{
#define SeparateImageTag "Separate/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (channel == GrayChannels)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Separate image channels.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
switch (channel)
{
case RedChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
break;
}
case GreenChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelGreen(q));
SetPixelBlue(q,GetPixelGreen(q));
q++;
}
break;
}
case BlueChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelBlue(q));
SetPixelGreen(q,GetPixelBlue(q));
q++;
}
break;
}
case OpacityChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelOpacity(q));
SetPixelGreen(q,GetPixelOpacity(q));
SetPixelBlue(q,GetPixelOpacity(q));
q++;
}
break;
}
case BlackChannel:
{
if ((image->storage_class != PseudoClass) &&
(image->colorspace != CMYKColorspace))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelIndex(indexes+x));
SetPixelGreen(q,GetPixelIndex(indexes+x));
SetPixelBlue(q,GetPixelIndex(indexes+x));
q++;
}
break;
}
case TrueAlphaChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelAlpha(q));
SetPixelGreen(q,GetPixelAlpha(q));
SetPixelBlue(q,GetPixelAlpha(q));
q++;
}
break;
}
case GrayChannels:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,q)));
q++;
}
break;
}
default:
break;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SeparateImageChannel)
#endif
proceed=SetImageProgress(image,SeparateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (channel != GrayChannels)
{
image->matte=MagickFalse;
image->intensity=Rec709LuminancePixelIntensityMethod;
(void) SetImageColorspace(image,LinearGRAYColorspace);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImages() returns a separate grayscale image for each channel
% specified.
%
% The format of the SeparateImages method is:
%
% MagickBooleanType SeparateImages(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channels to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel,
% YellowChannel, or BlackChannel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImages(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
Image
*images,
*separate_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
images=NewImageList();
if ((channel & RedChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,RedChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & GreenChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,GreenChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & BlueChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,BlueChannel);
AppendImageToList(&images,separate_image);
}
if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace))
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,BlackChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & AlphaChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,TrueAlphaChannel);
AppendImageToList(&images,separate_image);
}
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha
% channel.
%
% The format of the SetImageAlphaChannel method is:
%
% MagickBooleanType SetImageAlphaChannel(Image *image,
% const AlphaChannelType alpha_type)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha_type: The alpha channel type: ActivateAlphaChannel,
% AssociateAlphaChannel, CopyAlphaChannel, Disassociate,
% DeactivateAlphaChannel, ExtractAlphaChannel, OpaqueAlphaChannel,
% ResetAlphaChannel, SetAlphaChannel, ShapeAlphaChannel, and
% TransparentAlphaChannel.
%
*/
MagickExport MagickBooleanType SetImageAlphaChannel(Image *image,
const AlphaChannelType alpha_type)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
exception=(&image->exception);
status=MagickTrue;
switch (alpha_type)
{
case ActivateAlphaChannel:
{
image->matte=MagickTrue;
break;
}
case AssociateAlphaChannel:
{
/*
Associate alpha.
*/
status=SetImageStorageClass(image,DirectClass);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
gamma=QuantumScale*GetPixelAlpha(q);
SetPixelRed(q,ClampToQuantum(gamma*GetPixelRed(q)));
SetPixelGreen(q,ClampToQuantum(gamma*GetPixelGreen(q)));
SetPixelBlue(q,ClampToQuantum(gamma*GetPixelBlue(q)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->matte=MagickFalse;
break;
}
case BackgroundAlphaChannel:
{
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
/*
Set transparent pixels to background color.
*/
if (image->matte == MagickFalse)
break;
status=SetImageStorageClass(image,DirectClass);
if (status == MagickFalse)
break;
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
SetPixelPacket(image,&background,&pixel,&index);
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (q->opacity == TransparentOpacity)
{
SetPixelRed(q,pixel.red);
SetPixelGreen(q,pixel.green);
SetPixelBlue(q,pixel.blue);
}
q++;
}
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case CopyAlphaChannel:
case ShapeAlphaChannel:
{
/*
Special usage case for SeparateImageChannel(): copy grayscale color to
the alpha channel.
*/
status=SeparateImageChannel(image,GrayChannels);
image->matte=MagickTrue; /* make sure transparency is now on! */
if (alpha_type == ShapeAlphaChannel)
{
MagickPixelPacket
background;
/*
Reset all color channels to background color.
*/
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&(image->background_color),(IndexPacket *)
NULL,&background);
(void) LevelColorsImage(image,&background,&background,MagickTrue);
}
break;
}
case DeactivateAlphaChannel:
{
image->matte=MagickFalse;
break;
}
case DisassociateAlphaChannel:
{
status=SetImageStorageClass(image,DirectClass);
if (status == MagickFalse)
break;
image->matte=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
alpha,
gamma;
alpha=QuantumScale*GetPixelAlpha(q);
gamma=PerceptibleReciprocal(alpha);
SetPixelRed(q,ClampToQuantum(gamma*GetPixelRed(q)));
SetPixelGreen(q,ClampToQuantum(gamma*GetPixelGreen(q)));
SetPixelBlue(q,ClampToQuantum(gamma*GetPixelBlue(q)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->matte=MagickFalse;
break;
}
case ExtractAlphaChannel:
{
status=SeparateImageChannel(image,TrueAlphaChannel);
image->matte=MagickFalse;
break;
}
case RemoveAlphaChannel:
case FlattenAlphaChannel:
{
IndexPacket
index;
MagickPixelPacket
background;
PixelPacket
pixel;
/*
Flatten image pixels over the background pixels.
*/
if (image->matte == MagickFalse)
break;
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
break;
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
(void) memset(&pixel,0,sizeof(pixel));
index=0;
SetPixelPacket(image,&background,&pixel,&index);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma,
opacity;
gamma=1.0-QuantumScale*QuantumScale*q->opacity*pixel.opacity;
opacity=(double) QuantumRange*(1.0-gamma);
gamma=PerceptibleReciprocal(gamma);
q->red=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->red,
(MagickRealType) q->opacity,(MagickRealType) pixel.red,
(MagickRealType) pixel.opacity));
q->green=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->green,
(MagickRealType) q->opacity,(MagickRealType) pixel.green,
(MagickRealType) pixel.opacity));
q->blue=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->blue,
(MagickRealType) q->opacity,(MagickRealType) pixel.blue,
(MagickRealType) pixel.opacity));
q->opacity=ClampToQuantum(opacity);
q++;
}
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case ResetAlphaChannel: /* deprecated */
case OpaqueAlphaChannel:
{
status=SetImageOpacity(image,OpaqueOpacity);
break;
}
case SetAlphaChannel:
{
if (image->matte == MagickFalse)
status=SetImageOpacity(image,OpaqueOpacity);
break;
}
case TransparentAlphaChannel:
{
status=SetImageOpacity(image,TransparentOpacity);
break;
}
case UndefinedAlphaChannel:
break;
}
if (status == MagickFalse)
return(status);
return(SyncImagePixelCache(image,&image->exception));
}
|
zlascl.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include <math.h>
/******************************************************************************/
int plasma_zlascl(plasma_enum_t uplo,
double cfrom, double cto,
int m, int n,
plasma_complex64_t *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaGeneral) &&
(uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if (cfrom == 0.0 || isnan(cfrom)) {
plasma_error("illegal value of cfrom");
return -2;
}
if (isnan(cto)) {
plasma_error("illegal value of cto");
return -3;
}
if (m < 0) {
plasma_error("illegal value of m");
return -4;
}
if (n < 0) {
plasma_error("illegal value of n");
return -5;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -7;
}
// quick return
if (imin(n, m) == 0)
return PlasmaSuccess;
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_general_desc_create() failed");
return retval;
}
// Create sequence.
plasma_sequence_t *sequence = NULL;
retval = plasma_sequence_create(&sequence);
if (retval != PlasmaSuccess) {
plasma_error("plasma_sequence_create() failed");
return retval;
}
// Initialize request.
plasma_request_t request = PlasmaRequestInitializer;
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, sequence, &request);
// Call tile async function.
plasma_omp_zlascl(uplo, cfrom, cto, A, sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(A, pA, lda, sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence->status;
plasma_sequence_destroy(sequence);
return status;
}
/******************************************************************************/
void plasma_omp_zlascl(plasma_enum_t uplo,
double cfrom, double cto,
plasma_desc_t A,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaGeneral) &&
(uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (cfrom == 0.0 || isnan(cfrom)) {
plasma_error("illegal value of cfrom");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (isnan(cto)) {
plasma_error("illegal value of cto");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0)
return;
// Call the parallel function.
plasma_pzlascl(uplo, cfrom, cto, A, sequence, request);
}
|
streamTriad.c | /*
* =======================================================================================
*
* Author: Jan Eitzinger (je), jan.eitzinger@fau.de
* Copyright (c) 2019 RRZE, University Erlangen-Nuremberg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* =======================================================================================
*/
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#include <limits.h>
#include <float.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#define SIZE 20000000ull
#define NTIMES 3
#define ARRAY_ALIGNMENT 64
#define HLINE "----------------------------------------------------------------------------\n"
#ifndef MIN
#define MIN(x,y) ((x)<(y)?(x):(y))
#endif
#ifndef MAX
#define MAX(x,y) ((x)>(y)?(x):(y))
#endif
#ifndef ABS
#define ABS(a) ((a) >= 0 ? (a) : -(a))
#endif
extern double striad_seq(double*, const double*, const double*, const double*, int, int);
extern double striad_tp(double*, const double*, const double*, const double*, int, int);
extern double striad_ws(double*, const double*, const double*, const double*, int, int);
extern double getTimeStamp();
typedef double (*testFunc)(double*, const double*, const double*, const double*, int, int);
int main (int argc, char** argv)
{
size_t bytesPerWord = sizeof(double);
size_t N;
int type;
size_t iter = 1;
size_t scale = 1;
double *a, *b, *c, *d;
double E, S;
double avgtime, maxtime, mintime;
double times[NTIMES];
double dataSize;
testFunc func;
char* testname;
if ( argc > 2 ) {
type = atoi(argv[1]);
N = atoi(argv[2]);
} else {
printf("Usage: %s <test type> <N>\n",argv[0]);
printf("Test types: 0 - sequential, 1 - OpenMP throughput, 2 - OpenMP worksharing\n");
exit(EXIT_SUCCESS);
}
switch ( type ) {
case 0:
func = striad_seq;
testname = "striad_seq";
break;
case 1:
func = striad_tp;
testname = "striad_tp";
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp single
scale = omp_get_num_threads();
}
#endif
break;
case 2:
func = striad_ws;
testname = "striad_ws";
break;
default:
printf("Unknown test type: %d\n", type);
exit(EXIT_FAILURE);
}
posix_memalign((void**) &a, ARRAY_ALIGNMENT, N * bytesPerWord );
posix_memalign((void**) &b, ARRAY_ALIGNMENT, N * bytesPerWord );
posix_memalign((void**) &c, ARRAY_ALIGNMENT, N * bytesPerWord );
posix_memalign((void**) &d, ARRAY_ALIGNMENT, N * bytesPerWord );
avgtime = 0;
maxtime = 0;
mintime = FLT_MAX;
#ifdef _OPENMP
#pragma omp parallel
{
#ifdef VERBOSE
int k = omp_get_num_threads();
int i = omp_get_thread_num();
#pragma omp single
printf ("OpenMP enabled, running with %d threads\n", k);
#endif
}
#endif
#pragma omp parallel for
for (int i=0; i<N; i++) {
a[i] = 2.0;
b[i] = 1.0;
c[i] = 0.8;
d[i] = 1.01;
}
iter = 5;
times[0] = 0.0;
times[1] = 0.0;
while ( times[0] < 0.3 ){
times[0] = func(a, b, c, d, N, iter);
if ( times[0] > 0.1 ) break;
double factor = 0.3 / (times[0] - times[1]);
iter *= (int) factor;
times[1] = times[0];
}
for ( int k=0; k < NTIMES; k++) {
times[k] = func(a, b, c, d, N, iter);
}
for (int k=1; k<NTIMES; k++) {
avgtime = avgtime + times[k];
mintime = MIN(mintime, times[k]);
maxtime = MAX(maxtime, times[k]);
}
double kB = (double) 4.0 * N * bytesPerWord;
double flops = (double) 2.0 * N * iter * scale;
printf("%.2f %.2f\n", 1.0E-03 * kB, 1.0E-06 * flops/mintime);
return EXIT_SUCCESS;
}
double getTimeStamp()
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (double)ts.tv_sec + (double)ts.tv_nsec * 1.e-9;
}
double striad_seq(
double * restrict a,
const double * restrict b,
const double * restrict c,
const double * restrict d,
int N,
int iter
)
{
double S, E;
S = getTimeStamp();
for(int j = 0; j < iter; j++) {
#pragma vector aligned
for (int i=0; i<N; i++) {
a[i] = b[i] + d[i] * c[i];
}
if (a[N-1] > 2000) printf("Ai = %f\n",a[N-1]);
}
E = getTimeStamp();
return E-S;
}
double striad_tp(
double * restrict a,
const double * restrict b,
const double * restrict c,
const double * restrict d,
int N,
int iter
)
{
double S, E;
#pragma omp parallel
{
double* al;
posix_memalign((void**) &al, ARRAY_ALIGNMENT, N * sizeof(double));
#pragma omp single
S = getTimeStamp();
for(int j = 0; j < iter; j++) {
#pragma vector aligned
for (int i=0; i<N; i++) {
al[i] = b[i] + d[i] * c[i];
}
if (al[N-1] > 2000) printf("Ai = %f\n",al[N-1]);
}
#pragma omp single
E = getTimeStamp();
}
return E-S;
}
double striad_ws(
double * restrict a,
const double * restrict b,
const double * restrict c,
const double * restrict d,
int N,
int iter
)
{
double S, E;
S = getTimeStamp();
#pragma omp parallel
{
for(int j = 0; j < iter; j++) {
#pragma omp for
#pragma vector aligned
for (int i=0; i<N; i++) {
a[i] = b[i] + d[i] * c[i];
}
if (a[N-1] > 2000) printf("Ai = %f\n",a[N-1]);
}
}
E = getTimeStamp();
return E-S;
}
|
cpl_fft_body.h | /*
* This file is part of the ESO Common Pipeline Library
* Copyright (C) 2001-2017 European Southern Observatory
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FFTNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#define CPL_FFTW_ADD(a) CPL_CONCAT2X(CPL_FFTW, a)
#define CPL_TYPE_ADD(a) CPL_CONCAT2X(a, CPL_TYPE)
#define CPL_TYPE_ADD_CONST(a) CPL_CONCAT3X(a, CPL_TYPE, const)
#define CPL_TYPE_ADD_COMPLEX(a) CPL_CONCAT2X(a, CPL_TYPE_C)
#define CPL_TYPE_ADD_COMPLEX_CONST(a) CPL_CONCAT3X(a, CPL_TYPE_C, const)
static cpl_error_code CPL_TYPE_ADD(cpl_fft_image)(cpl_image *,
const cpl_image *,
cpl_fft_mode ,
unsigned ,
CPL_FFTW_ADD(plan) *,
CPL_FFTW_TYPE **,
CPL_FFTW_TYPE **,
cpl_boolean)
#ifdef CPL_HAVE_ATTR_NONNULL
__attribute__((nonnull(6,7)))
#endif
;
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Perform a FFT operation on an image of a specific type
@param self Pre-allocated output image of the given type
@param other Input image
@param mode CPL_FFT_FORWARD or CPL_FFT_BACKWARD, optionally CPL_FFT_NOSCALE
@param rigor FFTW_ESTIMATE, FFTW_MEASURE etc. Transform only w. _ESTIMATE
@param pplan NULL, or a pointer to keep the plan
@param pbufin A pointer to keep the input buffer
@param pbufout A pointer to keep the output buffer
@param is_last CPL_TRUE for the last call with the given pplan
@return CPL_ERROR_NONE or the corresponding #_cpl_error_code_
@see cpl_fft_image_()
@note The precision for both images must be either double or float.
When pplan is non-NULL, then the plan is destroyed when is_last is TRUE
*/
/*----------------------------------------------------------------------------*/
static cpl_error_code CPL_TYPE_ADD(cpl_fft_image)(cpl_image * self,
const cpl_image * other,
cpl_fft_mode mode,
unsigned rigor,
CPL_FFTW_ADD(plan) * pplan,
CPL_FFTW_TYPE ** pbufin,
CPL_FFTW_TYPE ** pbufout,
cpl_boolean is_last)
{
const cpl_type typin = cpl_image_get_type(other);
const cpl_type typout = cpl_image_get_type(self);
const int nxin = (int)cpl_image_get_size_x(other);
const int nyin = (int)cpl_image_get_size_y(other);
const int nxout = (int)cpl_image_get_size_x(self);
const int nxh = ((mode & CPL_FFT_FORWARD) ? nxin : nxout) / 2 + 1;
cpl_error_code error = CPL_ERROR_NONE;
/* FIXME: This should be verified during configure and replaced by
an assert() */
cpl_ensure_code(sizeof(CPL_TYPE complex) == sizeof(CPL_FFTW_TYPE),
CPL_ERROR_UNSUPPORTED_MODE);
if (mode & CPL_FFT_FORWARD) {
CPL_FFTW_ADD(plan) pforw;
CPL_FFTW_TYPE * out_b = (CPL_FFTW_TYPE*)
CPL_TYPE_ADD_COMPLEX(cpl_image_get_data)(self);
CPL_FFTW_TYPE * out_bt;
cpl_ensure_code(out_b != NULL, CPL_ERROR_TYPE_MISMATCH);
/* Make sure mode contains only the supported flags */
cpl_ensure_code(!(mode & ~(CPL_FFT_FORWARD | CPL_FFT_NOSCALE)),
CPL_ERROR_ILLEGAL_INPUT);
if (typin & CPL_TYPE_COMPLEX) {
CPL_FFTW_TYPE * in_b;
CPL_FFTW_TYPE * in_bt;
size_t alignmask;
CPL_DIAG_PRAGMA_PUSH_IGN(-Wcast-qual);
in_b = (CPL_FFTW_TYPE *)
CPL_TYPE_ADD_COMPLEX_CONST(cpl_image_get_data)(other);
CPL_DIAG_PRAGMA_POP;
if (pplan != NULL && *pplan != NULL) {
pforw = *pplan;
} else {
#ifdef _OPENMP
#pragma omp critical(cpl_fft_fftw)
#endif
{
/* Allocate transformation buffers.
Each is used iff the pixel buffer is not aligned.
When the caller is cpl_fft_imagelist() a subsequent call
may need the transformation buffer, so we always create
it although it may never be written to.
For the same reason we use FFTW_PRESERVE_INPUT.
*/
/* FIXME: In-place faster (or at least not slower) ? */
*pbufin = CPL_FFTW_ADD(malloc)(nxin * sizeof(CPL_FFTW_TYPE)
* nyin);
*pbufout = CPL_FFTW_ADD(malloc)(nxin * sizeof(CPL_FFTW_TYPE)
* nyin);
/* FIXME: If unaligned then drop FFTW_PRESERVE_INPUT */
pforw = CPL_FFTW_ADD(plan_dft_2d)(nyin, nxin, *pbufin,
*pbufout, FFTW_FORWARD,
rigor
| FFTW_PRESERVE_INPUT);
}
if (pplan != NULL) *pplan = pforw;
}
alignmask = (size_t)(*pbufin) | (size_t)(*pbufout);
in_bt = cpl_fft_aligned((void*)in_b, *pbufin, alignmask);
if (in_bt == *pbufin) {
memcpy(in_bt, in_b, nxin * sizeof(CPL_FFTW_TYPE) * nyin);
}
out_bt = in_b == out_b ?
*pbufout : cpl_fft_aligned((void*)out_b, *pbufout, alignmask);
CPL_FFTW_ADD(execute_dft)(pforw, in_bt, out_bt);
if (out_bt == *pbufout) {
memcpy(out_b, *pbufout, nxin * sizeof(CPL_FFTW_TYPE) * nyin);
}
} else {
const CPL_TYPE * in_b =
CPL_TYPE_ADD_CONST(cpl_image_get_data)(other);
CPL_TYPE * in_bt;
size_t alignmask;
/* For the real-to-complex transform, only the left half of
the result is computed. The size of the output image may
either match that, or the input buffer */
if (pplan != NULL && *pplan != NULL) {
pforw = *pplan;
} else {
#ifdef _OPENMP
#pragma omp critical(cpl_fft_fftw)
#endif
{
/* Allocate transformation buffers.
Each is used iff the pixel buffer is not aligned
- or in case of the output of the right size */
*pbufin = CPL_FFTW_ADD(malloc)(nxin * sizeof(CPL_TYPE)
* nyin);
*pbufout = CPL_FFTW_ADD(malloc)(nxh * sizeof(CPL_FFTW_TYPE)
* nyin);
/* FIXME: If unaligned then drop FFTW_PRESERVE_INPUT */
pforw = CPL_FFTW_ADD(plan_dft_r2c_2d)(nyin, nxin,
(CPL_TYPE*)*pbufin,
*pbufout, rigor
| FFTW_PRESERVE_INPUT);
}
if (pplan != NULL) *pplan = pforw;
}
alignmask = (size_t)(*pbufin) | (size_t)(*pbufout);
CPL_DIAG_PRAGMA_PUSH_IGN(-Wcast-qual);
in_bt = cpl_fft_aligned((void*)in_b, *pbufin, alignmask);
CPL_DIAG_PRAGMA_POP;
if (in_bt == (CPL_TYPE*)*pbufin) {
memcpy(in_bt, in_b, nxin * sizeof(CPL_TYPE) * nyin);
}
out_bt = nxout == nxh ? cpl_fft_aligned((void*)out_b, *pbufout,
alignmask)
: *pbufout;
CPL_FFTW_ADD(execute_dft_r2c)(pforw, in_bt, out_bt);
if (nxout != nxh) {
/* Need to repack the transformed half */
const CPL_FFTW_TYPE * out_bhj = *pbufout;
CPL_FFTW_TYPE * out_bj = out_b;
int j;
for (j = 0; j < nyin; j++, out_bhj += nxh, out_bj += nxin) {
(void)memcpy(out_bj, out_bhj, nxh * sizeof(*out_bj));
}
} else if (out_bt == *pbufout) {
/* For the real-to-complex transform, only the left half of
the transform is done. The output matches that,
but is not aligned. */
(void)memcpy(out_b, out_bt, nxh * sizeof(CPL_FFTW_TYPE) * nyin);
}
}
if (pplan == NULL || is_last) {
double fl_add = 0.0, fl_mul = 0.0, fl_fma = 0.0;
#ifdef _OPENMP
#pragma omp critical(cpl_fft_fftw)
#endif
{
CPL_FFTW_ADD(flops)(pforw, &fl_add, &fl_mul, &fl_fma);
CPL_FFTW_ADD(destroy_plan)(pforw);
CPL_FFTW_ADD(free)(*pbufin);
CPL_FFTW_ADD(free)(*pbufout);
}
cpl_tools_add_flops((cpl_flops)(fl_add + fl_mul + 2.0 * fl_fma));
}
} else if (mode & CPL_FFT_BACKWARD) {
CPL_FFTW_ADD(plan) pback;
const CPL_FFTW_TYPE * in_b = (const CPL_FFTW_TYPE *)
CPL_TYPE_ADD_COMPLEX_CONST(cpl_image_get_data)(other);
size_t alignmask;
/* Make sure mode contains only the supported flags */
cpl_ensure_code(!(mode & ~(CPL_FFT_BACKWARD | CPL_FFT_NOSCALE)),
CPL_ERROR_ILLEGAL_INPUT);
cpl_ensure_code(typin & CPL_TYPE_COMPLEX, CPL_ERROR_TYPE_MISMATCH);
if (typout & CPL_TYPE_COMPLEX) {
CPL_FFTW_TYPE * out_b =
CPL_TYPE_ADD_COMPLEX(cpl_image_get_data)(self);
CPL_FFTW_TYPE * out_bt;
CPL_FFTW_TYPE * in_bt;
if (pplan != NULL && *pplan != NULL) {
pback = *pplan;
} else {
#ifdef _OPENMP
#pragma omp critical(cpl_fft_fftw)
#endif
{
/* Allocate transformation buffers.
Each is used iff the pixel buffer is not aligned */
/* FIXME: In-place faster (or at least not slower) ? */
*pbufin = CPL_FFTW_ADD(malloc)(nxin * sizeof(CPL_FFTW_TYPE)
* nyin);
*pbufout = CPL_FFTW_ADD(malloc)(nxin * sizeof(CPL_FFTW_TYPE)
* nyin);
/* FIXME: If unaligned then drop FFTW_PRESERVE_INPUT */
pback = CPL_FFTW_ADD(plan_dft_2d)(nyin, nxin, *pbufin,
*pbufout, FFTW_BACKWARD,
FFTW_PRESERVE_INPUT
| rigor);
}
if (pplan != NULL) *pplan = pback;
}
alignmask = (size_t)(*pbufin) | (size_t)(*pbufout);
CPL_DIAG_PRAGMA_PUSH_IGN(-Wcast-qual);
in_bt = cpl_fft_aligned((void*)in_b, *pbufin, alignmask);
CPL_DIAG_PRAGMA_POP;
if (in_bt == *pbufin) {
memcpy(in_bt, in_b, nxin * sizeof(CPL_FFTW_TYPE) * nyin);
}
out_bt = in_b == out_b ?
*pbufout : cpl_fft_aligned((void*)out_b, *pbufout, alignmask);
CPL_FFTW_ADD(execute_dft)(pback, in_bt, out_bt);
if (out_bt == *pbufout) {
memcpy(out_b, *pbufout, nxin * sizeof(CPL_FFTW_TYPE) * nyin);
}
} else {
CPL_TYPE * out_b = CPL_TYPE_ADD(cpl_image_get_data)(self);
CPL_TYPE * out_bt;
/* FFTW always modifies the input array in the C2R transform,
so pbufin is always required here */
if (pplan != NULL && *pplan != NULL) {
pback = *pplan;
} else {
#ifdef _OPENMP
#pragma omp critical(cpl_fft_fftw)
#endif
{
/* Allocate transformation buffers. The output buffer is
used iff the pixel buffer is not aligned */
*pbufin = CPL_FFTW_ADD(malloc)(nxh * sizeof(CPL_FFTW_TYPE)
* nyin);
*pbufout = CPL_FFTW_ADD(malloc)(nxout * sizeof(CPL_TYPE)
* nyin);
/* From http://www.fftw.org/doc/Planner-Flags.html (3.3.4)
(2014-12-11): for multi-dimensional c2r transforms,
however, no input-preserving algorithms are implemented
and the planner will return NULL if one is requested.
*/
pback = CPL_FFTW_ADD(plan_dft_c2r_2d)(nyin, nxout, *pbufin,
(CPL_TYPE*)*pbufout,
FFTW_DESTROY_INPUT
| rigor);
}
if (pplan != NULL) *pplan = pback;
}
if (nxin != nxh) {
/* For the complex-to-real transform, only the left half of
the input is transformed. It needs to be repacked first */
const CPL_FFTW_TYPE * in_bj = in_b;
CPL_FFTW_TYPE * in_bhj = *pbufin;
int j;
for (j = 0; j < nyin; j++, in_bhj += nxh, in_bj += nxin) {
(void)memcpy(in_bhj, in_bj, nxh * sizeof(*in_bhj));
}
} else {
/* For the complex-to-real transform, only the left half of
the input is transformed. The input matches that. */
(void)memcpy(*pbufin, in_b, nxh * sizeof(CPL_FFTW_TYPE) * nyin);
}
alignmask = (size_t)(*pbufin) | (size_t)(*pbufout);
out_bt = cpl_fft_aligned((void*)out_b, *pbufout, alignmask);
CPL_FFTW_ADD(execute_dft_c2r)(pback, *pbufin, out_bt);
if (out_bt == (CPL_TYPE*)*pbufout) {
memcpy(out_b, *pbufout, nxout * sizeof(CPL_TYPE) * nyin);
}
}
if (pplan == NULL || is_last) {
double fl_add = 0.0, fl_mul = 0.0, fl_fma = 0.0;
#ifdef _OPENMP
#pragma omp critical(cpl_fft_fftw)
#endif
{
CPL_FFTW_ADD(flops)(pback, &fl_add, &fl_mul, &fl_fma);
CPL_FFTW_ADD(destroy_plan)(pback);
CPL_FFTW_ADD(free)(*pbufin);
CPL_FFTW_ADD(free)(*pbufout);
}
cpl_tools_add_flops((cpl_flops)(fl_add + fl_mul + 2.0 * fl_fma));
}
if (!(mode & CPL_FFT_NOSCALE)) {
error = cpl_image_divide_scalar(self, (double)(nxout * nyin));
}
} else {
error = CPL_ERROR_ILLEGAL_INPUT;
}
return cpl_error_set_(error); /* Set or propagate error, if any */
}
#undef CPL_TYPE_ADD
#undef CPL_TYPE_ADD_CONST
#undef CPL_TYPE_ADD_COMPLEX
#undef CPL_TYPE_ADD_COMPLEX_CONST
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright @ 2003 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/fourier.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p,
*magick_restrict q;
Quantum
*magick_restrict r;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickBooleanType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
size_t
local_area = 0;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
local_area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
{
area+=local_area;
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
size_t
local_area = 0;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
local_area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
{
area+=local_area;
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=area*distortion[CompositePixelChannel];
image->error.normalized_mean_error=area*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
double
channel_distortion[MaxPixelChannels+1];
size_t
local_area = 0;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
local_area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
{
area+=local_area;
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
channels,
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distortion[i]+=area*QuantumScale*((double) p[i]-
image_statistics[channel].mean)*(GetPixelChannel(reconstruct_image,
channel,q)-reconstruct_statistics[channel].mean);
else
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-reconstruct_statistics[channel].mean);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
channels=0;
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel = GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
if (fabs(gamma) >= MagickEpsilon)
{
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
channels++;
}
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
channels);
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=10.0*MagickLog10(1.0)-10.0*MagickLog10(distortion[i]);
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
#define SSIMRadius 5.0
#define SSIMSigma 1.5
#define SSIMBlocksize 8
#define SSIMK1 0.01
#define SSIMK2 0.03
#define SSIML 1.0
CacheView
*image_view,
*reconstruct_view;
char
geometry[MagickPathExtent];
const char
*artifact;
double
area,
c1,
c2,
radius,
sigma;
KernelInfo
*kernel_info;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
/*
Compute structural similarity index @
https://en.wikipedia.org/wiki/Structural_similarity.
*/
radius=SSIMRadius;
artifact=GetImageArtifact(image,"compare:ssim-radius");
if (artifact != (const char *) NULL)
radius=StringToDouble(artifact,(char **) NULL);
sigma=SSIMSigma;
artifact=GetImageArtifact(image,"compare:ssim-sigma");
if (artifact != (const char *) NULL)
sigma=StringToDouble(artifact,(char **) NULL);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
c1=pow(SSIMK1*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k1");
if (artifact != (const char *) NULL)
c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
c2=pow(SSIMK2*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k2");
if (artifact != (const char *) NULL)
c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
status=MagickTrue;
area=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,reconstruct_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
size_t
local_area = 0;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y-
((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/
2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
x_pixel_mu[MaxPixelChannels+1],
x_pixel_sigma_squared[MaxPixelChannels+1],
xy_sigma[MaxPixelChannels+1],
y_pixel_mu[MaxPixelChannels+1],
y_pixel_sigma_squared[MaxPixelChannels+1];
const Quantum
*magick_restrict reference,
*magick_restrict target;
MagickRealType
*k;
ssize_t
v;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
(void) memset(x_pixel_mu,0,sizeof(x_pixel_mu));
(void) memset(x_pixel_sigma_squared,0,sizeof(x_pixel_sigma_squared));
(void) memset(xy_sigma,0,sizeof(xy_sigma));
(void) memset(x_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
(void) memset(y_pixel_mu,0,sizeof(y_pixel_mu));
(void) memset(y_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
k=kernel_info->values;
reference=p;
target=q;
for (v=0; v < (ssize_t) kernel_info->height; v++)
{
ssize_t
u;
for (u=0; u < (ssize_t) kernel_info->width; u++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
x_pixel,
y_pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel=QuantumScale*reference[i];
x_pixel_mu[i]+=(*k)*x_pixel;
x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel;
y_pixel=QuantumScale*
GetPixelChannel(reconstruct_image,channel,target);
y_pixel_mu[i]+=(*k)*y_pixel;
y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel;
xy_sigma[i]+=(*k)*x_pixel*y_pixel;
}
k++;
reference+=GetPixelChannels(image);
target+=GetPixelChannels(reconstruct_image);
}
reference+=GetPixelChannels(image)*columns;
target+=GetPixelChannels(reconstruct_image)*columns;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
ssim,
x_pixel_mu_squared,
x_pixel_sigmas_squared,
xy_mu,
xy_sigmas,
y_pixel_mu_squared,
y_pixel_sigmas_squared;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i];
y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i];
xy_mu=x_pixel_mu[i]*y_pixel_mu[i];
xy_sigmas=xy_sigma[i]-xy_mu;
x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared;
y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared;
ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/
((x_pixel_mu_squared+y_pixel_mu_squared+c1)*
(x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2));
channel_distortion[i]+=ssim;
channel_distortion[CompositePixelChannel]+=ssim;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
local_area++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetStructuralSimilarityDistortion)
#endif
{
area+=local_area;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]+=channel_distortion[i];
}
}
image_view=DestroyCacheView(image_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
distortion[j]/=area;
}
distortion[CompositePixelChannel]/=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
kernel_info=DestroyKernelInfo(kernel_info);
return(status);
}
static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=(1.0-(distortion[i]))/2.0;
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_HDRI_SUPPORT) && defined(MAGICKCORE_FFTW_DELEGATE)
static Image *CrossCorrelationImage(const Image *alpha_image,
const Image *beta_image,ExceptionInfo *exception)
{
Image
*clone_image,
*complex_conjugate,
*complex_multiplication,
*cross_correlation,
*fft_images;
/*
Take the FFT of beta image.
*/
clone_image=CloneImage(beta_image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return(clone_image);
(void) SetImageArtifact(clone_image,"fourier:normalize","inverse");
fft_images=ForwardFourierTransformImage(clone_image,MagickFalse,
exception);
clone_image=DestroyImageList(clone_image);
if (fft_images == (Image *) NULL)
return(fft_images);
/*
Take the complex conjugate of beta image.
*/
complex_conjugate=ComplexImages(fft_images,ConjugateComplexOperator,
exception);
fft_images=DestroyImageList(fft_images);
if (complex_conjugate == (Image *) NULL)
return(complex_conjugate);
/*
Take the FFT of the alpha image.
*/
clone_image=CloneImage(alpha_image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
{
complex_conjugate=DestroyImageList(complex_conjugate);
return(clone_image);
}
(void) SetImageArtifact(clone_image,"fourier:normalize","inverse");
fft_images=ForwardFourierTransformImage(clone_image,MagickFalse,exception);
clone_image=DestroyImageList(clone_image);
if (fft_images == (Image *) NULL)
{
complex_conjugate=DestroyImageList(complex_conjugate);
return(fft_images);
}
complex_conjugate->next->next=fft_images;
/*
Do complex multiplication.
*/
(void) SetImageArtifact(complex_conjugate,"compose:clamp","false");
complex_multiplication=ComplexImages(complex_conjugate,
MultiplyComplexOperator,exception);
complex_conjugate=DestroyImageList(complex_conjugate);
if (fft_images == (Image *) NULL)
return(fft_images);
/*
Do the IFT and return the cross-correlation result.
*/
cross_correlation=InverseFourierTransformImage(complex_multiplication,
complex_multiplication->next,MagickFalse,exception);
complex_multiplication=DestroyImageList(complex_multiplication);
return(cross_correlation);
}
static Image *NCCDivideImage(const Image *alpha_image,const Image *beta_image,
ExceptionInfo *exception)
{
CacheView
*alpha_view,
*beta_view;
Image
*divide_image;
MagickBooleanType
status;
ssize_t
y;
/*
Divide one image into another.
*/
divide_image=CloneImage(alpha_image,0,0,MagickTrue,exception);
if (divide_image == (Image *) NULL)
return(divide_image);
status=MagickTrue;
alpha_view=AcquireAuthenticCacheView(divide_image,exception);
beta_view=AcquireVirtualCacheView(beta_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(beta_image,divide_image,divide_image->rows,1)
#endif
for (y=0; y < (ssize_t) divide_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(beta_view,0,y,beta_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(alpha_view,0,y,divide_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) divide_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(divide_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(divide_image,i);
PixelTrait traits = GetPixelChannelTraits(divide_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (fabs(p[i]) >= MagickEpsilon)
q[i]*=PerceptibleReciprocal(QuantumScale*p[i]);
}
p+=GetPixelChannels(beta_image);
q+=GetPixelChannels(divide_image);
}
if (SyncCacheViewAuthenticPixels(alpha_view,exception) == MagickFalse)
status=MagickFalse;
}
beta_view=DestroyCacheView(beta_view);
alpha_view=DestroyCacheView(alpha_view);
if (status == MagickFalse)
divide_image=DestroyImage(divide_image);
return(divide_image);
}
static MagickBooleanType NCCMaximaImage(const Image *image,double *maxima,
RectangleInfo *offset,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Identify the maxima value in the image and its location.
*/
status=MagickTrue;
*maxima=0.0;
offset->x=0;
offset->y=0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
sum = 0.0;
ssize_t
channels = 0,
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
sum+=p[i];
channels++;
}
if ((channels != 0) && ((sum/channels) > *maxima))
{
*maxima=sum/channels;
offset->x=x;
offset->y=y;
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType NCCMultiplyImage(Image *image,const double factor,
const ChannelStatistics *channel_statistics,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Multiply each pixel by a factor.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics != (const ChannelStatistics *) NULL)
q[i]*=QuantumScale*channel_statistics[channel].standard_deviation;
q[i]*=factor;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
static Image *NCCSquareImage(const Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*square_image;
MagickBooleanType
status;
ssize_t
y;
/*
Square each pixel in the image.
*/
square_image=CloneImage(image,0,0,MagickTrue,exception);
if (square_image == (Image *) NULL)
return(square_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(square_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(square_image,square_image,square_image->rows,1)
#endif
for (y=0; y < (ssize_t) square_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,square_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) square_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(square_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(square_image,i);
PixelTrait traits = GetPixelChannelTraits(square_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]*=QuantumScale*q[i];
}
q+=GetPixelChannels(square_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
square_image=DestroyImage(square_image);
return(square_image);
}
static Image *NCCSubtractImageMean(const Image *alpha_image,
const Image *beta_image,const ChannelStatistics *channel_statistics,
ExceptionInfo *exception)
{
CacheView
*beta_view,
*image_view;
Image
*gamma_image;
MagickBooleanType
status;
ssize_t
y;
/*
Subtract the image mean and pad.
*/
gamma_image=CloneImage(beta_image,alpha_image->columns,alpha_image->rows,
MagickTrue,exception);
if (gamma_image == (Image *) NULL)
return(gamma_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(gamma_image,exception);
beta_view=AcquireVirtualCacheView(beta_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(beta_image,gamma_image,gamma_image->rows,1)
#endif
for (y=0; y < (ssize_t) gamma_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(beta_view,0,y,beta_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,gamma_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) gamma_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(gamma_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(gamma_image,i);
PixelTrait traits = GetPixelChannelTraits(gamma_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((x >= (ssize_t) beta_image->columns) ||
(y >= (ssize_t) beta_image->rows))
q[i]=(Quantum) 0;
else
q[i]=p[i]-channel_statistics[channel].mean;
}
p+=GetPixelChannels(beta_image);
q+=GetPixelChannels(gamma_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
beta_view=DestroyCacheView(beta_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
gamma_image=DestroyImage(gamma_image);
return(gamma_image);
}
static Image *NCCUnityImage(const Image *alpha_image,const Image *beta_image,
ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*unity_image;
MagickBooleanType
status;
ssize_t
y;
/*
Create a padded unity image.
*/
unity_image=CloneImage(alpha_image,alpha_image->columns,alpha_image->rows,
MagickTrue,exception);
if (unity_image == (Image *) NULL)
return(unity_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(unity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(unity_image,unity_image,unity_image->rows,1)
#endif
for (y=0; y < (ssize_t) unity_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,unity_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) unity_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(unity_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(unity_image,i);
PixelTrait traits = GetPixelChannelTraits(unity_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=QuantumRange;
if ((x >= (ssize_t) beta_image->columns) ||
(y >= (ssize_t) beta_image->rows))
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(unity_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unity_image=DestroyImage(unity_image);
return(unity_image);
}
static Image *NCCVarianceImage(Image *alpha_image,const Image *beta_image,
ExceptionInfo *exception)
{
CacheView
*beta_view,
*image_view;
Image
*variance_image;
MagickBooleanType
status;
ssize_t
y;
/*
Compute the variance of the two images.
*/
variance_image=CloneImage(alpha_image,0,0,MagickTrue,exception);
if (variance_image == (Image *) NULL)
return(variance_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(variance_image,exception);
beta_view=AcquireVirtualCacheView(beta_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(beta_image,variance_image,variance_image->rows,1)
#endif
for (y=0; y < (ssize_t) variance_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(beta_view,0,y,beta_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,variance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) variance_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(variance_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(variance_image,i);
PixelTrait traits = GetPixelChannelTraits(variance_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum((QuantumRange*sqrt(fabs((double) QuantumScale*
(q[i]-p[i])))))/sqrt((double) QuantumRange);
}
p+=GetPixelChannels(beta_image);
q+=GetPixelChannels(variance_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
beta_view=DestroyCacheView(beta_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
variance_image=DestroyImage(variance_image);
return(variance_image);
}
static Image *NCCSimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define DestroySimilarityResources() \
{ \
if (channel_statistics != (ChannelStatistics *) NULL) \
channel_statistics=(ChannelStatistics *) \
RelinquishMagickMemory(channel_statistics); \
if (beta_image != (Image *) NULL) \
beta_image=DestroyImage(beta_image); \
if (gamma_image != (Image *) NULL) \
gamma_image=DestroyImage(gamma_image); \
if (ncc_image != (Image *) NULL) \
ncc_image=DestroyImage(ncc_image); \
if (normalize_image != (Image *) NULL) \
normalize_image=DestroyImage(normalize_image); \
if (square_image != (Image *) NULL) \
square_image=DestroyImage(square_image); \
if (unity_image != (Image *) NULL) \
unity_image=DestroyImage(unity_image); \
}
#define ThrowSimilarityException() \
{ \
DestroySimilarityResources() \
return((Image *) NULL); \
}
ChannelStatistics
*channel_statistics = (ChannelStatistics *) NULL;
double
maxima = 0.0;
Image
*beta_image = (Image *) NULL,
*correlation_image = (Image *) NULL,
*gamma_image = (Image *) NULL,
*ncc_image = (Image *) NULL,
*normalize_image = (Image *) NULL,
*square_image = (Image *) NULL,
*unity_image = (Image *) NULL;
MagickBooleanType
status;
RectangleInfo
geometry;
/*
Accelerated correlation-based image similary using FFT local statistics.
Contributed by Fred Weinhaus.
*/
square_image=NCCSquareImage(image,exception);
if (square_image == (Image *) NULL)
ThrowSimilarityException();
unity_image=NCCUnityImage(image,reference,exception);
if (unity_image == (Image *) NULL)
ThrowSimilarityException();
/*
Compute the cross correlation of the square and unity images.
*/
ncc_image=CrossCorrelationImage(square_image,unity_image,exception);
square_image=DestroyImage(square_image); \
if (ncc_image == (Image *) NULL)
ThrowSimilarityException();
status=NCCMultiplyImage(ncc_image,(double) QuantumRange*reference->columns*
reference->rows,(const ChannelStatistics *) NULL,exception);
if (status == MagickFalse)
ThrowSimilarityException();
/*
Compute the cross correlation of the source and unity images.
*/
gamma_image=CrossCorrelationImage(image,unity_image,exception);
unity_image=DestroyImage(unity_image);
if (gamma_image == (Image *) NULL)
ThrowSimilarityException();
square_image=NCCSquareImage(gamma_image,exception);
gamma_image=DestroyImage(gamma_image);
status=NCCMultiplyImage(square_image,(double) QuantumRange,
(const ChannelStatistics *) NULL,exception);
if (status == MagickFalse)
ThrowSimilarityException();
/*
Compute the variance of the two images.
*/
gamma_image=NCCVarianceImage(ncc_image,square_image,exception);
square_image=DestroyImage(square_image);
ncc_image=DestroyImage(ncc_image);
if (gamma_image == (Image *) NULL)
ThrowSimilarityException();
channel_statistics=GetImageStatistics(reference,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
ThrowSimilarityException();
/*
Subtract the image mean.
*/
status=NCCMultiplyImage(gamma_image,1.0,channel_statistics,exception);
if (status == MagickFalse)
ThrowSimilarityException();
normalize_image=NCCSubtractImageMean(image,reference,channel_statistics,
exception);
if (normalize_image == (Image *) NULL)
ThrowSimilarityException();
ncc_image=CrossCorrelationImage(image,normalize_image,exception);
normalize_image=DestroyImage(normalize_image);
if (ncc_image == (Image *) NULL)
ThrowSimilarityException();
/*
Divide the two images.
*/
beta_image=NCCDivideImage(ncc_image,gamma_image,exception);
ncc_image=DestroyImage(ncc_image);
gamma_image=DestroyImage(gamma_image);
if (beta_image == (Image *) NULL)
ThrowSimilarityException();
(void) ResetImagePage(beta_image,"0x0+0+0");
SetGeometry(image,&geometry);
geometry.width=image->columns-reference->columns;
geometry.height=image->rows-reference->rows;
/*
Crop padding.
*/
correlation_image=CropImage(beta_image,&geometry,exception);
beta_image=DestroyImage(beta_image);
if (correlation_image == (Image *) NULL)
ThrowSimilarityException();
(void) ResetImagePage(correlation_image,"0x0+0+0");
/*
Identify the maxima value in the image and its location.
*/
status=GrayscaleImage(correlation_image,AveragePixelIntensityMethod,
exception);
if (status == MagickFalse)
ThrowSimilarityException();
status=NCCMaximaImage(correlation_image,&maxima,offset,exception);
if (status == MagickFalse)
{
correlation_image=DestroyImage(correlation_image);
ThrowSimilarityException();
}
*similarity_metric=1.0-QuantumScale*maxima;
DestroySimilarityResources();
return(correlation_image);
}
#endif
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
#if defined(MAGICKCORE_HDRI_SUPPORT) && defined(MAGICKCORE_FFTW_DELEGATE)
{
const char *artifact = GetImageArtifact(image,"compare:accelerate-ncc");
MagickBooleanType accelerate = (artifact != (const char *) NULL) &&
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
if ((accelerate != MagickFalse) &&
(metric == NormalizedCrossCorrelationErrorMetric))
{
similarity_image=NCCSimilarityImage(image,reference,metric,
similarity_threshold,offset,similarity_metric,exception);
return(similarity_image);
}
}
#endif
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
singlenode_sparsify.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_SINGLENODE_SPARSIFY_H_
#define SRC_SINGLENODE_SPARSIFY_H_
template <typename Ta, typename Tb>
void sparsify_dense_segment(Ta* v1, int * bitvector, int nnz, int num_ints,
Tb* v2, int * bitvector2, int * nnz_out,
void (*add_fp)(Ta, bool*, Tb*, void*), void* vsp) {
#pragma omp parallel for
for(int i = 0 ; i < num_ints ; i++)
{
bitvector2[i] = 0;
}
int sparse_nnz = 0;
#pragma omp parallel for reduction(+:sparse_nnz)
for(int ii = 0 ; ii < num_ints ; ii++)
{
int local_nnz = 0;
if(_popcnt32(bitvector[ii] == 0)) continue;
for(int i = ii*32 ; i < (ii+1)*32 ; i++)
{
if(get_bitvector(i, bitvector))
{
bool keep = true;
add_fp(v1[i], &keep, &(v2[i]), vsp);
if(keep)
{
set_bitvector(i, bitvector2);
local_nnz++;
}
}
}
sparse_nnz += local_nnz;
}
*nnz_out = sparse_nnz;
}
template <typename Ta, typename Tb>
void sparsify_dense_tile(Ta* v1, int * bitvector, int m, int n,
Tb* v2, int * bitvector2, int * nnz_out,
void (*add_fp)(Ta, bool*, Tb*, void*), void* vsp) {
int nnz = m*n;
int sparse_nnz = 0;
#pragma omp parallel for reduction(+:sparse_nnz)
for(int i = 0 ; i < nnz ; i+=256)
{
int local_nnz = 0;
int lim = i + 256;
if(lim > nnz) lim = m;
for(int ii = i ; ii < lim ; ii++)
{
if(get_bitvector(ii, bitvector))
{
bool keep = true;
add_fp(v1[ii], &keep, &(v2[ii]), vsp);
if(keep)
{
set_bitvector(ii, bitvector2);
local_nnz++;
}
}
}
sparse_nnz += local_nnz;
}
*nnz_out = sparse_nnz;
}
template <typename Ta, typename Tb>
void sparsify_dense_to_csr(Ta* v1, int * bitvector, int m, int n,
Tb ** ac, int ** jc, int ** ic,
void (*add_fp)(Ta, bool*, Tb*, void*), void* vsp) {
// Allocate B to be at least as large as A
int nthreads = omp_get_max_threads();
int npartitions = nthreads * 4;
int * nnzp = new int[npartitions];
#pragma omp parallel for schedule(dynamic)
for(int p = 0 ; p < npartitions ; p++)
{
int local_nnz = 0;
int rows_per_partition = ((m + npartitions-1) / npartitions);
int start_m = p * rows_per_partition;
int end_m = (p+1) * rows_per_partition;
if(end_m > m) end_m = m;
for(int i = start_m ; i < end_m ; i++)
{
for(int j = 0 ; j < n ; j++)
{
int idx = i + j * m;
if(get_bitvector(idx, bitvector))
{
Tb tmp;
bool keep = true;
add_fp(v1[idx], &keep, &tmp, vsp);
if(keep)
{
local_nnz++;
}
}
}
}
nnzp[p] = local_nnz;
}
int nnz = 0;
for(int p = 0 ; p < npartitions ; p++)
{
nnz += nnzp[p];
}
for(int p = 1 ; p < npartitions ; p++)
{
nnzp[p] = nnzp[p-1] + nnzp[p];
}
*ac = reinterpret_cast<Tb*> (_mm_malloc(nnz * sizeof(Tb), 64));
*jc = reinterpret_cast<int*> (_mm_malloc(nnz* sizeof(int), 64));
*ic = reinterpret_cast<int*>( _mm_malloc((m+1) * sizeof(int), 64));
#pragma omp parallel for schedule(dynamic)
for(int p = 0 ; p < npartitions ; p++)
{
int start_nnz = 0;
if(p > 0) start_nnz = nnzp[p-1];
int rows_per_partition = ((m + npartitions-1) / npartitions);
int start_m = p * rows_per_partition;
int end_m = (p+1) * rows_per_partition;
if(end_m > m) end_m = m;
for(int i = start_m ; i < end_m ; i++)
{
(*ic)[i] = start_nnz+1;
for(int j = 0 ; j < n ; j++)
{
int idx = i + j * m;
if(get_bitvector(idx, bitvector))
{
Tb tmp;
bool keep = true;
add_fp(v1[idx], &keep, &tmp, vsp);
if(keep)
{
(*ac)[start_nnz] = tmp;
(*jc)[start_nnz] = j+1;
start_nnz++;
}
}
}
}
}
(*ic)[m] = nnz+1;
delete [] nnzp;
}
#endif // SRC_SINGLENODE_SPARSIFY_H_
|
PrimPar.c | /*
*
* Proyecto final
*
* Daniel Roa - A01021960
* Christian Dalma - A01423166
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
#define DIM 1000
void init(void);
void delete (int);
struct prims
{
int edge[DIM][DIM];
int dim;
int U[DIM];
int total_minDist;
int counts;
};
struct prims prim;
int main()
{
int ch, j, t, p_c, p_j, k, serial = 1, i;
int minDist;
int newElem;
prim.total_minDist = 0;
prim.counts = 0;
minDist = 1000;
printf("A continuación, inserte la cantidad de paradas de su ruta:\n");
scanf("%d", &prim.dim);
int num = prim.dim;
int id = omp_get_thread_num();
srand(time(NULL));
printf("Inserta el peso del tráfico: \n");
printf("Hilo %d\n", id);
for (i = 0; i < prim.dim; ++i)
{
for (j = 0; j < prim.dim; j++)
{
prim.edge[i][j] = rand() % num;
printf("Cost: %d ",prim.edge[i][j]);
printf("From %d To %d\n",i,j);
}
}
double start = omp_get_wtime();
init();
for (k = 0; k < prim.dim - 1; k++)
{
minDist = 1000;
for (i = 0; i < prim.counts; i++)
{
#pragma omp parallel for schedule(static)
for (j = 0; j < prim.dim; j++)
{
if (prim.edge[prim.U[i]][j] > minDist || prim.edge[prim.U[i]][j] == 0)
{
continue;
}
else
{
#pragma omp critical
{
minDist = prim.edge[prim.U[i]][j];
newElem = j;
printf("%d --> %d\n", i + 1, j + 1);
}
}
}
}
prim.total_minDist += minDist;
prim.U[i] = newElem;
delete (newElem);
prim.counts++;
}
printf("\n");
for (i = 0; i < prim.dim; i++)
{
printf("%d ", prim.U[i] + 1);
if (i < prim.dim - 1)
printf("-> ");
}
printf("\n\n");
double finito = omp_get_wtime() - start;
printf("Le tomó %.5g segundos en averiguar una solución.\n", finito);
printf("Distancia mínima entre paradas: %d\n\n", prim.total_minDist);
return 0;
}
void init(void)
{
int i, j;
prim.total_minDist = 0;
prim.counts = 0;
for (i = 0; i < prim.dim; i++)
prim.U[i] = -1;
prim.U[0] = 0;
delete (prim.U[0]);
prim.counts++;
}
void delete (int next_element)
{
int k;
for (k = 0; k < prim.dim; k++)
{
prim.edge[k][next_element] = 0;
}
} |
morpho_SIMD.c | #include "morpho_SIMD.h"
vbits** erosion_3x3_SIMD_naif(vbits** img_bin, int height, int width)
{
int nb_vbits_col = ceil((float)width/128);
int nb_unused_col = (128-(width%128))%128;
vbits** m = (vbits**)vui32matrix(0, height-1, 0, nb_vbits_col-1);
vbits a0, b0, c0;
vbits a1, b1, c1;
vbits a2, b2, c2;
vbits aa0, cc0;
vbits aa1, cc1;
vbits aa2, cc2;
vbits y;
// Ligne 0
for(int i = 1; i < nb_vbits_col-1; i++){
a0 = a1 = vec_load(&img_bin[0][i-1]);
a2 = vec_load(&img_bin[1][i-1]);
b0 = b1 = vec_load(&img_bin[0][i-0]);
b2 = vec_load(&img_bin[1][i-0]);
c0 = c1 = vec_load(&img_bin[0][i+1]);
c2 = vec_load(&img_bin[1][i+1]);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vAND_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[0][i], y);
}
// Ligne height-1
for(int i = 1; i < nb_vbits_col-1; i++){
a2 = a1 = vec_load(&img_bin[height-1][i-1]);
a0 = vec_load(&img_bin[height-2][i-1]);
b2 = b1 = vec_load(&img_bin[height-1][i-0]);
b0 = vec_load(&img_bin[height-2][i-0]);
c2 = c1 = vec_load(&img_bin[height-1][i+1]);
c0 = vec_load(&img_bin[height-1-2][i+1]);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vAND_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[height-1][i], y);
}
//Colonne 0
for(int i = 1; i < height-1; i++){
b0 = vec_load(&img_bin[i-1][0]);
c0 = vec_load(&img_bin[i-1][1]);
a0 = _mm_bitshift_right(b0, 127);
b1 = vec_load(&img_bin[i-0][0]);
c1 = vec_load(&img_bin[i-0][1]);
a1 = _mm_bitshift_right(b1, 127);
b2 = vec_load(&img_bin[i+1][0]);
c2 = vec_load(&img_bin[i+1][1]);
a2 = _mm_bitshift_right(b2, 127);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vAND_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[i][0], y);
}
//Colonne nb_vbits_col-1
for(int i = 1; i < height-1; i++){
b0 = vec_load(&img_bin[i-1][nb_vbits_col-1]);
a0 = vec_load(&img_bin[i-1][nb_vbits_col-2]);
c0 = _mm_bitshift_left(b0, 127-nb_unused_col);
b1 = vec_load(&img_bin[i-0][nb_vbits_col-1]);
a1 = vec_load(&img_bin[i-0][nb_vbits_col-2]);
c1 = _mm_bitshift_left(b1, 127-nb_unused_col);
b2 = vec_load(&img_bin[i+1][nb_vbits_col-1]);
a2 = vec_load(&img_bin[i+1][nb_vbits_col-2]);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin_unused_col(b0,c0,nb_unused_col);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin_unused_col(b1,c1,nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
y = vAND_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[i][nb_vbits_col-1], y);
}
//Coin sup gauche
b0 = b1 = vec_load(&img_bin[0][0]);
c0 = c1 = vec_load(&img_bin[0][1]);
a0 = a1 = _mm_bitshift_right(b1, 127);
b2 = vec_load(&img_bin[1][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = vec_load(&img_bin[1][1]);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vAND_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[0][0], y);
//Coin sup droit
a0 = a1 = vec_load(&img_bin[0][nb_vbits_col-2]);
b0 = b1 = vec_load(&img_bin[0][nb_vbits_col-1]);
c0 = c1 = _mm_bitshift_left(b1, 127);
a2 = vec_load(&img_bin[1][nb_vbits_col-2]);
b2 = vec_load(&img_bin[1][nb_vbits_col-1]);
c2 = _mm_bitshift_left(b2, 127);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vAND_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[0][nb_vbits_col-1], y);
//Coin inf gauche
c2 = c1 = vec_load(&img_bin[height-1][1]);
b1 = b2 = vec_load(&img_bin[height-1][0]);
b0 = vec_load(&img_bin[height-2][0]);
c0 = vec_load(&img_bin[height-2][1]);
a2 = a1 = _mm_bitshift_right(b1, 127);
a0 = _mm_bitshift_right(b0, 127);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vAND_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[height-1][0], y);
//Coin inf droit
a2 = a1 = vec_load(&img_bin[height-1][nb_vbits_col-2]);
b2 = b1 = vec_load(&img_bin[height-1][nb_vbits_col-1]);
c2 = c1 = _mm_bitshift_left(b1, 127);
a0 = vec_load(&img_bin[height-2][nb_vbits_col-2]);
b0 = vec_load(&img_bin[height-2][nb_vbits_col-1]);
c0 = _mm_bitshift_left(b0, 127);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vAND_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[height-1][nb_vbits_col-1], y);
# pragma omp parallel
# pragma omp for
for(int i = 1; i < height-1; i++){
for(int j = 1; j < nb_vbits_col-1; j++){
a0 = vec_load(&img_bin[i-1][j-1]);
b0 = vec_load(&img_bin[i-1][j-0]);
c0 = vec_load(&img_bin[i-1][j+1]);
a1 = vec_load(&img_bin[i-0][j-1]);
b1 = vec_load(&img_bin[i-0][j-0]);
c1 = vec_load(&img_bin[i-0][j+1]);
a2 = vec_load(&img_bin[i+1][j-1]);
b2 = vec_load(&img_bin[i+1][j-0]);
c2 = vec_load(&img_bin[i+1][j+1]);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vAND_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[i][j], y);
}
}
return m;
}
vbits** dilatation_3x3_SIMD_naif(vbits** img_bin, int height, int width)
{
int nb_vbits_col = ceil((float)width/128);
int nb_unused_col = (128-(width%128))%128;
vbits** m = (vbits**)vui32matrix(0, height-1, 0, nb_vbits_col-1);
vbits a0, b0, c0;
vbits a1, b1, c1;
vbits a2, b2, c2;
vbits aa0, cc0;
vbits aa1, cc1;
vbits aa2, cc2;
vbits y;
// Ligne 0
for(int i = 1; i < nb_vbits_col-1; i++){
a0 = a1 = vec_load(&img_bin[0][i-1]);
a2 = vec_load(&img_bin[1][i-1]);
b0 = b1 = vec_load(&img_bin[0][i-0]);
b2 = vec_load(&img_bin[1][i-0]);
c0 = c1 = vec_load(&img_bin[0][i+1]);
c2 = vec_load(&img_bin[1][i+1]);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vOR_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[0][i], y);
}
// Ligne height-1
for(int i = 1; i < nb_vbits_col-1; i++){
a2 = a1 = vec_load(&img_bin[height-1][i-1]);
a0 = vec_load(&img_bin[height-2][i-1]);
b2 = b1 = vec_load(&img_bin[height-1][i-0]);
b0 = vec_load(&img_bin[height-2][i-0]);
c2 = c1 = vec_load(&img_bin[height-1][i+1]);
c0 = vec_load(&img_bin[height-1-2][i+1]);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vOR_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[height-1][i], y);
}
//Colonne 0
for(int i = 1; i < height-1; i++){
b0 = vec_load(&img_bin[i-1][0]);
c0 = vec_load(&img_bin[i-1][1]);
a0 = _mm_bitshift_right(b0, 127);
b1 = vec_load(&img_bin[i-0][0]);
c1 = vec_load(&img_bin[i-0][1]);
a1 = _mm_bitshift_right(b1, 127);
b2 = vec_load(&img_bin[i+1][0]);
c2 = vec_load(&img_bin[i+1][1]);
a2 = _mm_bitshift_right(b2, 127);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vOR_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[i][0], y);
}
//Colonne nb_vbits_col-1
for(int i = 1; i < height-1; i++){
b0 = vec_load(&img_bin[i-1][nb_vbits_col-1]);
a0 = vec_load(&img_bin[i-1][nb_vbits_col-2]);
c0 = _mm_bitshift_left(b0, 127-nb_unused_col);
b1 = vec_load(&img_bin[i-0][nb_vbits_col-1]);
a1 = vec_load(&img_bin[i-0][nb_vbits_col-2]);
c1 = _mm_bitshift_left(b1, 127-nb_unused_col);
b2 = vec_load(&img_bin[i+1][nb_vbits_col-1]);
a2 = vec_load(&img_bin[i+1][nb_vbits_col-2]);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin_unused_col(b0,c0,nb_unused_col);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin_unused_col(b1,c1,nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
y = vOR_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[i][nb_vbits_col-1], y);
}
//Coin sup gauche
b0 = b1 = vec_load(&img_bin[0][0]);
c0 = c1 = vec_load(&img_bin[0][1]);
a0 = a1 = _mm_bitshift_right(b1, 127);
b2 = vec_load(&img_bin[1][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = vec_load(&img_bin[1][1]);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vOR_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[0][0], y);
//Coin sup droit
a0 = a1 = vec_load(&img_bin[0][nb_vbits_col-2]);
b0 = b1 = vec_load(&img_bin[0][nb_vbits_col-1]);
c0 = c1 = _mm_bitshift_left(b1, 127);
a2 = vec_load(&img_bin[1][nb_vbits_col-2]);
b2 = vec_load(&img_bin[1][nb_vbits_col-1]);
c2 = _mm_bitshift_left(b2, 127);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vOR_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[0][nb_vbits_col-1], y);
//Coin inf gauche
c2 = c1 = vec_load(&img_bin[height-1][1]);
b1 = b2 = vec_load(&img_bin[height-1][0]);
b0 = vec_load(&img_bin[height-2][0]);
c0 = vec_load(&img_bin[height-2][1]);
a2 = a1 = _mm_bitshift_right(b1, 127);
a0 = _mm_bitshift_right(b0, 127);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vOR_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[height-1][0], y);
//Coin inf droit
a2 = a1 = vec_load(&img_bin[height-1][nb_vbits_col-2]);
b2 = b1 = vec_load(&img_bin[height-1][nb_vbits_col-1]);
c2 = c1 = _mm_bitshift_left(b1, 127);
a0 = vec_load(&img_bin[height-2][nb_vbits_col-2]);
b0 = vec_load(&img_bin[height-2][nb_vbits_col-1]);
c0 = _mm_bitshift_left(b0, 127);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vOR_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[height-1][nb_vbits_col-1], y);
#pragma omp parallel
#pragma omp for
for(int i = 1; i < height-1; i++){
for(int j = 1; j < nb_vbits_col-1; j++){
a0 = vec_load(&img_bin[i-1][j-1]);
b0 = vec_load(&img_bin[i-1][j-0]);
c0 = vec_load(&img_bin[i-1][j+1]);
a1 = vec_load(&img_bin[i-0][j-1]);
b1 = vec_load(&img_bin[i-0][j-0]);
c1 = vec_load(&img_bin[i-0][j+1]);
a2 = vec_load(&img_bin[i+1][j-1]);
b2 = vec_load(&img_bin[i+1][j-0]);
c2 = vec_load(&img_bin[i+1][j+1]);
aa0 = vec_left1_bin(a0,b0);
cc0 = vec_right1_bin(b0,c0);
aa1 = vec_left1_bin(a1,b1);
cc1 = vec_right1_bin(b1,c1);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
y = vOR_2D_9(aa0, b0, cc0, aa1, b1, cc1, aa2, b2, cc2);
vec_store(&m[i][j], y);
}
}
return m;
}
void compute_erosion_3x3_SIMD_opti(char* basePath, int save)
{
CHECK_ERROR(system("mkdir -p output/erosion_3x3_SIMD_opti"));
char buff[60];
int nrl, nrh, ncl, nch;
size_t height, width;
uint8 **img, **output;
vbits **img_bin, **output_bin;
for(int i = 1; i < 200; i++){
sprintf(buff, "%s%.3d.pgm", basePath, i);
img = LoadPGM_ui8matrix(buff, &nrl, &nrh, &ncl, &nch);
height = nrh-nrl+1;
width = nch-ncl+1;
img_bin = convert_to_binary(img, height, width);
output_bin = erosion_3x3_SIMD_opti(img_bin, height, width);
output = convert_from_binary(output_bin, height, width);
if(save){
sprintf(buff, "output/erosion_3x3_SIMD_opti/ero3%.3d.pgm", i);
SavePGM_ui8matrix(output, nrl, nrh, ncl, nch, buff);
}
free_ui8matrix(img, nrl, nrh, ncl, nch);
free_ui8matrix(output, nrl, nrh, ncl, nch);
free_vbitsmatrix(img_bin, height, width);
free_vbitsmatrix(output_bin, height, width);
}
}
// AND logique sur un voisinage de taille 3x3
vbits ** erosion_3x3_SIMD_opti(vbits** img_bin, int height, int width)
{
int nb_vbits_col = ceil((float)width/128);
int nb_unused_col = (128-(width%128))%128;
vbits **m = (vbits**)vui32matrix(0, height-1, 0, nb_vbits_col-1);
// Ajout d'un pointeur vers les lignes 0 et height-1 de l'image pour gérer plus facilement les bords
vbits **img_bin_extra_lines=(vbits **) _mm_malloc ((size_t)((height+2)*sizeof(vbits*)), 16);
if (!img_bin_extra_lines) vnrerror("allocation failure in erosion_3x3_SIMD_opti()");
img_bin_extra_lines++;
for(int i = 0; i < height; i++)
img_bin_extra_lines[i] = img_bin[i];
img_bin_extra_lines[-1] = img_bin_extra_lines[0];
img_bin_extra_lines[height] = img_bin_extra_lines[height-1];
vbits a0, b0, c0, a0_last, b0_last, c0_last;
vbits a1, b1, c1, a1_last, b1_last, c1_last;;
vbits a2, b2, c2, a2_last, b2_last, c2_last;;
vbits aa0, cc0, aa0_last, cc0_last;
vbits aa1, cc1, aa1_last, cc1_last;
vbits aa2, cc2, aa2_last, cc2_last;
vbits and0, and1, and2, and0_last, and1_last, and2_last;
vbits y, y_last;
int n = height%3;
// Cas particulier des images à moins de 128 colonnes
if(nb_vbits_col == 1){
// PROLOGUE
b0 = vec_load(&img_bin_extra_lines[-1][0]);
b1 = vec_load(&img_bin_extra_lines[0][0]);
a0 = _mm_bitshift_right(b0, 127);
a1 = _mm_bitshift_right(b1, 127);
c0 = _mm_bitshift_left(b0, 127-nb_unused_col);
c1 = _mm_bitshift_left(b1, 127-nb_unused_col);
aa0 = vec_left1_bin(a0, b0);
cc0 = vec_right1_bin_unused_col(b0, c0, nb_unused_col);
aa1 = vec_left1_bin(a1, b1);
cc1 = vec_right1_bin_unused_col(b1, c1, nb_unused_col);
and0 = vAND3(aa0, b0, cc0);
and1 = vAND3(aa1, b1, cc1);
for(int i=0; i < height-n; i+=3){
b2 = vec_load(&img_bin_extra_lines[i+1][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
and2 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[i][0], y);
b2 = vec_load(&img_bin_extra_lines[i+2][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
and0 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[i+1][0], y);
b2 = vec_load(&img_bin_extra_lines[i+3][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
and1 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[i+2][0], y);
}
// EPILOGUE
switch (n) {
case 2:
b2 = vec_load(&img_bin_extra_lines[height-1][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
and2 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[height-2][0], y);
b2 = vec_load(&img_bin_extra_lines[height][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
and0 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[height-1][0], y);
break;
case 1:
b2 = vec_load(&img_bin_extra_lines[height][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
and2 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[height-1][0], y);
break;
default:
break;
}
_mm_free(img_bin_extra_lines-1);
return m;
}
// Gestion des cas 'standards'
a0 = vec_load(&img_bin_extra_lines[-1][0]);
b0 = vec_load(&img_bin_extra_lines[-1][1]);
a1 = vec_load(&img_bin_extra_lines[0][0]);
b1 = vec_load(&img_bin_extra_lines[0][1]);
for(int j = 1; j < nb_vbits_col-1; j++){
// PROLOGUE
c0 = vec_load(&img_bin_extra_lines[-1][j+1]);
c1 = vec_load(&img_bin_extra_lines[0][j+1]);
aa0 = vec_left1_bin(a0, b0);
cc0 = vec_right1_bin(b0, c0);
aa1 = vec_left1_bin(a1, b1);
cc1 = vec_right1_bin(b1, c1);
and0 = vAND3(aa0, b0, cc0);
and1 = vAND3(aa1, b1, cc1);
for(int i = 0; i < height-n; i+=3){
a2 = vec_load(&img_bin_extra_lines[i+1][j-1]);
b2 = vec_load(&img_bin_extra_lines[i+1][j+0]);
c2 = vec_load(&img_bin_extra_lines[i+1][j+1]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and2 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[i][j], y);
a2 = vec_load(&img_bin_extra_lines[i+2][j-1]);
b2 = vec_load(&img_bin_extra_lines[i+2][j+0]);
c2 = vec_load(&img_bin_extra_lines[i+2][j+1]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[i+1][j], y);
a2 = vec_load(&img_bin_extra_lines[i+3][j-1]);
b2 = vec_load(&img_bin_extra_lines[i+3][j+0]);
c2 = vec_load(&img_bin_extra_lines[i+3][j+1]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and1 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[i+2][j], y);
}
// EPILOGUE
switch (n) {
case 2:
a2 = vec_load(&img_bin_extra_lines[height-1][j-1]);
b2 = vec_load(&img_bin_extra_lines[height-1][j+0]);
c2 = vec_load(&img_bin_extra_lines[height-1][j+1]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and2 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[height-2][j], y);
a2 = vec_load(&img_bin_extra_lines[height][j-1]);
b2 = vec_load(&img_bin_extra_lines[height][j+0]);
c2 = vec_load(&img_bin_extra_lines[height][j+1]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[height-1][j], y);
break;
case 1:
a2 = vec_load(&img_bin_extra_lines[height][j-1]);
b2 = vec_load(&img_bin_extra_lines[height][j+0]);
c2 = vec_load(&img_bin_extra_lines[height][j+1]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and2 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[height-1][j], y);
break;
default:
break;
}
a0 = b0; b0 = c0;
a1 = b1; b1 = c1;
}
// Gestion des bords (première et dernière colonnes)
// PROLOGUE LAST COL
b0_last = b0;
b1_last = b1;
a0_last = a0;
a1_last = a1;
c0_last = _mm_bitshift_left(b0_last, 127-nb_unused_col);
c1_last = _mm_bitshift_left(b1_last, 127-nb_unused_col);
aa0_last = vec_left1_bin(a0_last, b0_last);
cc0_last = vec_right1_bin_unused_col(b0_last, c0_last, nb_unused_col);
aa1_last = vec_left1_bin(a1_last, b1_last);
cc1_last = vec_right1_bin_unused_col(b1_last, c1_last, nb_unused_col);
and0_last = vAND3(aa0_last, b0_last, cc0_last);
and1_last = vAND3(aa1_last, b1_last, cc1_last);
// PROLOGUE FIRST COL
b0 = vec_load(&img_bin_extra_lines[-1][0]);
c0 = vec_load(&img_bin_extra_lines[-1][1]);
a0 = _mm_bitshift_right(b0, 127);
b1 = vec_load(&img_bin_extra_lines[0][0]);
c1 = vec_load(&img_bin_extra_lines[0][1]);
a1 = _mm_bitshift_right(b1, 127);
aa0 = vec_left1_bin(a0, b0);
cc0 = vec_right1_bin(b0, c0);
aa1 = vec_left1_bin(a1, b1);
cc1 = vec_right1_bin(b1, c1);
and0 = vAND3(aa0, b0, cc0);
and1 = vAND3(aa1, b1, cc1);
for(int i = 0; i < height-n; i+=3){
// Déroulage de boucle i+0
b2 = vec_load(&img_bin_extra_lines[i+1][0]);
c2 = vec_load(&img_bin_extra_lines[i+1][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and2 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[i][0], y);
b2_last = vec_load(&img_bin_extra_lines[i+1][nb_vbits_col-1]);
a2_last = vec_load(&img_bin_extra_lines[i+1][nb_vbits_col-2]);
c2_last = _mm_bitshift_left(b2_last, 127-nb_unused_col);
aa2_last = vec_left1_bin(a2_last,b2_last);
cc2_last = vec_right1_bin_unused_col(b2_last,c2_last,nb_unused_col);
and2_last = vAND3(aa2_last, b2_last, cc2_last);
y_last = vAND3(and0_last, and1_last, and2_last);
vec_store(&m[i][nb_vbits_col-1], y_last);
// Déroulage de boucle i+1
b2 = vec_load(&img_bin_extra_lines[i+2][0]);
c2 = vec_load(&img_bin_extra_lines[i+2][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[i+1][0], y);
b2_last = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-1]);
a2_last = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-2]);
c2_last = _mm_bitshift_left(b2_last, 127-nb_unused_col);
aa2_last = vec_left1_bin(a2_last,b2_last);
cc2_last = vec_right1_bin_unused_col(b2_last,c2_last,nb_unused_col);
and0_last = vAND3(aa2_last, b2_last, cc2_last);
y_last = vAND3(and0_last, and1_last, and2_last);
vec_store(&m[i+1][nb_vbits_col-1], y_last);
// Déroulage de boucle i+2
b2 = vec_load(&img_bin_extra_lines[i+3][0]);
c2 = vec_load(&img_bin_extra_lines[i+3][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and1 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[i+2][0], y);
b2_last = vec_load(&img_bin_extra_lines[i+3][nb_vbits_col-1]);
a2_last = vec_load(&img_bin_extra_lines[i+3][nb_vbits_col-2]);
c2_last = _mm_bitshift_left(b2_last, 127-nb_unused_col);
aa2_last = vec_left1_bin(a2_last,b2_last);
cc2_last = vec_right1_bin_unused_col(b2_last,c2_last,nb_unused_col);
and1_last = vAND3(aa2_last, b2_last, cc2_last);
y_last = vAND3(and0_last, and1_last, and2_last);
vec_store(&m[i+2][nb_vbits_col-1], y_last);
}
// EPILOGUE
switch (n) {
case 2:
b2 = vec_load(&img_bin_extra_lines[height-1][0]);
c2 = vec_load(&img_bin_extra_lines[height-1][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and2 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[height-2][0], y);
b2_last = vec_load(&img_bin_extra_lines[height-1][nb_vbits_col-1]);
a2_last = vec_load(&img_bin_extra_lines[height-1][nb_vbits_col-2]);
c2_last = _mm_bitshift_left(b2_last, 127-nb_unused_col);
aa2_last = vec_left1_bin(a2_last,b2_last);
cc2_last = vec_right1_bin_unused_col(b2_last,c2_last,nb_unused_col);
and2_last = vAND3(aa2_last, b2_last, cc2_last);
y_last = vAND3(and0_last, and1_last, and2_last);
vec_store(&m[height-2][nb_vbits_col-1], y_last);
b2 = vec_load(&img_bin_extra_lines[height][0]);
c2 = vec_load(&img_bin_extra_lines[height][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[height-1][0], y);
b2_last = vec_load(&img_bin_extra_lines[height][nb_vbits_col-1]);
a2_last = vec_load(&img_bin_extra_lines[height][nb_vbits_col-2]);
c2_last = _mm_bitshift_left(b2_last, 127-nb_unused_col);
aa2_last = vec_left1_bin(a2_last,b2_last);
cc2_last = vec_right1_bin_unused_col(b2_last,c2_last,nb_unused_col);
and0_last = vAND3(aa2_last, b2_last, cc2_last);
y_last = vAND3(and0_last, and1_last, and2_last);
vec_store(&m[height-1][nb_vbits_col-1], y_last);
break;
case 1:
b2 = vec_load(&img_bin_extra_lines[height][0]);
c2 = vec_load(&img_bin_extra_lines[height][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and2 = vAND3(aa2, b2, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[height-1][0], y);
b2_last = vec_load(&img_bin_extra_lines[height][nb_vbits_col-1]);
a2_last = vec_load(&img_bin_extra_lines[height][nb_vbits_col-2]);
c2_last = _mm_bitshift_left(b2_last, 127-nb_unused_col);
aa2_last = vec_left1_bin(a2_last,b2_last);
cc2_last = vec_right1_bin_unused_col(b2_last,c2_last,nb_unused_col);
and2_last = vAND3(aa2_last, b2_last, cc2_last);
y_last = vAND3(and0_last, and1_last, and2_last);
vec_store(&m[height-1][nb_vbits_col-1], y_last);
break;
default:
break;
}
_mm_free(img_bin_extra_lines-1);
return m;
}
void compute_all_erosion_3x3_SIMD_opti(char* basePath, int save)
{
CHECK_ERROR(system("mkdir -p output/erosion_3x3_SIMD_opti"));
char buff[60];
int nrl, nrh, ncl, nch;
size_t height, width;
uint8 **img, **output;
vbits **img_bin, **output_bin;
for(int i = 1; i < 200; i++){
sprintf(buff, "%s%.3d.pgm", basePath, i);
img = LoadPGM_ui8matrix(buff, &nrl, &nrh, &ncl, &nch);
height = nrh-nrl+1;
width = nch-ncl+1;
img_bin = convert_to_binary(img, height, width);
output_bin = erosion_3x3_SIMD_opti(img_bin, height, width);
output = convert_from_binary(output_bin, height, width);
if(save){
sprintf(buff, "output/erosion_3x3_SIMD_opti/ero3%.3d.pgm", i);
SavePGM_ui8matrix(output, nrl, nrh, ncl, nch, buff);
}
free_ui8matrix(img, nrl, nrh, ncl, nch);
free_ui8matrix(output, nrl, nrh, ncl, nch);
free_vbitsmatrix(img_bin, height, width);
free_vbitsmatrix(output_bin, height, width);
}
}
// OR logique sur un voisinage de taille 3x3
vbits ** dilatation_3x3_SIMD_opti(vbits** img_bin, int height, int width)
{
int nb_vbits_col = ceil((float)width/128);
int nb_unused_col = (128-(width%128))%128;
vbits **m = (vbits**)vui32matrix(0, height-1, 0, nb_vbits_col-1);
// Ajout d'un pointeur vers les lignes 0 et height-1 de l'image pour gérer plus facilement les bords
vbits **img_bin_extra_lines=(vbits **) _mm_malloc ((size_t)((height+2)*sizeof(vbits*)), 16);
if (!img_bin_extra_lines) vnrerror("allocation failure in dilatation_3x3_SIMD_optiS()");
img_bin_extra_lines++;
for(int i = 0; i < height; i++)
img_bin_extra_lines[i] = img_bin[i];
img_bin_extra_lines[-1] = img_bin_extra_lines[0];
img_bin_extra_lines[height] = img_bin_extra_lines[height-1];
vbits a0, b0, c0, a0_last, b0_last, c0_last;
vbits a1, b1, c1, a1_last, b1_last, c1_last;;
vbits a2, b2, c2, a2_last, b2_last, c2_last;;
vbits aa0, cc0, aa0_last, cc0_last;
vbits aa1, cc1, aa1_last, cc1_last;
vbits aa2, cc2, aa2_last, cc2_last;
vbits or0, or1, or2, or0_last, or1_last, or2_last;
vbits y, y_last;
int n = height%3;
// Cas particulier des images à moins de 128 colonnes
if(nb_vbits_col == 1){
// PROLOGUE
b0 = vec_load(&img_bin_extra_lines[-1][0]);
b1 = vec_load(&img_bin_extra_lines[0][0]);
a0 = _mm_bitshift_right(b0, 127);
a1 = _mm_bitshift_right(b1, 127);
c0 = _mm_bitshift_left(b0, 127-nb_unused_col);
c1 = _mm_bitshift_left(b1, 127-nb_unused_col);
aa0 = vec_left1_bin(a0, b0);
cc0 = vec_right1_bin_unused_col(b0, c0, nb_unused_col);
aa1 = vec_left1_bin(a1, b1);
cc1 = vec_right1_bin_unused_col(b1, c1, nb_unused_col);
or0 = vOR3(aa0, b0, cc0);
or1 = vOR3(aa1, b1, cc1);
for(int i=0; i < height-n; i+=3){
b2 = vec_load(&img_bin_extra_lines[i+1][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
or2 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[i][0], y);
b2 = vec_load(&img_bin_extra_lines[i+2][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
or0 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[i+1][0], y);
b2 = vec_load(&img_bin_extra_lines[i+3][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
or1 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[i+2][0], y);
}
// EPILOGUE
switch (n) {
case 2:
b2 = vec_load(&img_bin_extra_lines[height-1][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
or2 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[height-2][0], y);
b2 = vec_load(&img_bin_extra_lines[height][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
or0 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[height-1][0], y);
break;
case 1:
b2 = vec_load(&img_bin_extra_lines[height][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
or2 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[height-1][0], y);
break;
default:
break;
}
_mm_free(img_bin_extra_lines-1);
return m;
}
// Gestion des cas 'standards'
a0 = vec_load(&img_bin_extra_lines[-1][0]);
b0 = vec_load(&img_bin_extra_lines[-1][1]);
a1 = vec_load(&img_bin_extra_lines[0][0]);
b1 = vec_load(&img_bin_extra_lines[0][1]);
for(int j = 1; j < nb_vbits_col-1; j++){
// PROLOGUE
c0 = vec_load(&img_bin_extra_lines[-1][j+1]);
c1 = vec_load(&img_bin_extra_lines[0][j+1]);
aa0 = vec_left1_bin(a0, b0);
cc0 = vec_right1_bin(b0, c0);
aa1 = vec_left1_bin(a1, b1);
cc1 = vec_right1_bin(b1, c1);
or0 = vOR3(aa0, b0, cc0);
or1 = vOR3(aa1, b1, cc1);
for(int i = 0; i < height-n; i+=3){
a2 = vec_load(&img_bin_extra_lines[i+1][j-1]);
b2 = vec_load(&img_bin_extra_lines[i+1][j+0]);
c2 = vec_load(&img_bin_extra_lines[i+1][j+1]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or2 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[i][j], y);
a2 = vec_load(&img_bin_extra_lines[i+2][j-1]);
b2 = vec_load(&img_bin_extra_lines[i+2][j+0]);
c2 = vec_load(&img_bin_extra_lines[i+2][j+1]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[i+1][j], y);
a2 = vec_load(&img_bin_extra_lines[i+3][j-1]);
b2 = vec_load(&img_bin_extra_lines[i+3][j+0]);
c2 = vec_load(&img_bin_extra_lines[i+3][j+1]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or1 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[i+2][j], y);
}
// EPILOGUE
switch (n) {
case 2:
a2 = vec_load(&img_bin_extra_lines[height-1][j-1]);
b2 = vec_load(&img_bin_extra_lines[height-1][j+0]);
c2 = vec_load(&img_bin_extra_lines[height-1][j+1]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or2 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[height-2][j], y);
a2 = vec_load(&img_bin_extra_lines[height][j-1]);
b2 = vec_load(&img_bin_extra_lines[height][j+0]);
c2 = vec_load(&img_bin_extra_lines[height][j+1]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[height-1][j], y);
break;
case 1:
a2 = vec_load(&img_bin_extra_lines[height][j-1]);
b2 = vec_load(&img_bin_extra_lines[height][j+0]);
c2 = vec_load(&img_bin_extra_lines[height][j+1]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or2 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[height-1][j], y);
break;
default:
break;
}
a0 = b0; b0 = c0;
a1 = b1; b1 = c1;
}
// Gestion des bords (première et dernière colonnes)
// PROLOGUE LAST COL
b0_last = b0;
b1_last = b1;
a0_last = a0;
a1_last = a1;
c0_last = _mm_bitshift_left(b0_last, 127-nb_unused_col);
c1_last = _mm_bitshift_left(b1_last, 127-nb_unused_col);
aa0_last = vec_left1_bin(a0_last, b0_last);
cc0_last = vec_right1_bin_unused_col(b0_last, c0_last, nb_unused_col);
aa1_last = vec_left1_bin(a1_last, b1_last);
cc1_last = vec_right1_bin_unused_col(b1_last, c1_last, nb_unused_col);
or0_last = vOR3(aa0_last, b0_last, cc0_last);
or1_last = vOR3(aa1_last, b1_last, cc1_last);
// PROLOGUE FIRST COL
b0 = vec_load(&img_bin_extra_lines[-1][0]);
c0 = vec_load(&img_bin_extra_lines[-1][1]);
a0 = _mm_bitshift_right(b0, 127);
b1 = vec_load(&img_bin_extra_lines[0][0]);
c1 = vec_load(&img_bin_extra_lines[0][1]);
a1 = _mm_bitshift_right(b1, 127);
aa0 = vec_left1_bin(a0, b0);
cc0 = vec_right1_bin(b0, c0);
aa1 = vec_left1_bin(a1, b1);
cc1 = vec_right1_bin(b1, c1);
or0 = vOR3(aa0, b0, cc0);
or1 = vOR3(aa1, b1, cc1);
for(int i = 0; i < height-n; i+=3){
// Déroulage de boucle i+0
b2 = vec_load(&img_bin_extra_lines[i+1][0]);
c2 = vec_load(&img_bin_extra_lines[i+1][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or2 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[i][0], y);
b2_last = vec_load(&img_bin_extra_lines[i+1][nb_vbits_col-1]);
a2_last = vec_load(&img_bin_extra_lines[i+1][nb_vbits_col-2]);
c2_last = _mm_bitshift_left(b2_last, 127-nb_unused_col);
aa2_last = vec_left1_bin(a2_last,b2_last);
cc2_last = vec_right1_bin_unused_col(b2_last,c2_last,nb_unused_col);
or2_last = vOR3(aa2_last, b2_last, cc2_last);
y_last = vOR3(or0_last, or1_last, or2_last);
vec_store(&m[i][nb_vbits_col-1], y_last);
// Déroulage de boucle i+1
b2 = vec_load(&img_bin_extra_lines[i+2][0]);
c2 = vec_load(&img_bin_extra_lines[i+2][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[i+1][0], y);
b2_last = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-1]);
a2_last = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-2]);
c2_last = _mm_bitshift_left(b2_last, 127-nb_unused_col);
aa2_last = vec_left1_bin(a2_last,b2_last);
cc2_last = vec_right1_bin_unused_col(b2_last,c2_last,nb_unused_col);
or0_last = vOR3(aa2_last, b2_last, cc2_last);
y_last = vOR3(or0_last, or1_last, or2_last);
vec_store(&m[i+1][nb_vbits_col-1], y_last);
// Déroulage de boucle i+2
b2 = vec_load(&img_bin_extra_lines[i+3][0]);
c2 = vec_load(&img_bin_extra_lines[i+3][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or1 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[i+2][0], y);
b2_last = vec_load(&img_bin_extra_lines[i+3][nb_vbits_col-1]);
a2_last = vec_load(&img_bin_extra_lines[i+3][nb_vbits_col-2]);
c2_last = _mm_bitshift_left(b2_last, 127-nb_unused_col);
aa2_last = vec_left1_bin(a2_last,b2_last);
cc2_last = vec_right1_bin_unused_col(b2_last,c2_last,nb_unused_col);
or1_last = vOR3(aa2_last, b2_last, cc2_last);
y_last = vOR3(or0_last, or1_last, or2_last);
vec_store(&m[i+2][nb_vbits_col-1], y_last);
}
// EPILOGUE
switch (n) {
case 2:
b2 = vec_load(&img_bin_extra_lines[height-1][0]);
c2 = vec_load(&img_bin_extra_lines[height-1][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or2 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[height-2][0], y);
b2_last = vec_load(&img_bin_extra_lines[height-1][nb_vbits_col-1]);
a2_last = vec_load(&img_bin_extra_lines[height-1][nb_vbits_col-2]);
c2_last = _mm_bitshift_left(b2_last, 127-nb_unused_col);
aa2_last = vec_left1_bin(a2_last,b2_last);
cc2_last = vec_right1_bin_unused_col(b2_last,c2_last,nb_unused_col);
or2_last = vOR3(aa2_last, b2_last, cc2_last);
y_last = vOR3(or0_last, or1_last, or2_last);
vec_store(&m[height-2][nb_vbits_col-1], y_last);
b2 = vec_load(&img_bin_extra_lines[height][0]);
c2 = vec_load(&img_bin_extra_lines[height][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[height-1][0], y);
b2_last = vec_load(&img_bin_extra_lines[height][nb_vbits_col-1]);
a2_last = vec_load(&img_bin_extra_lines[height][nb_vbits_col-2]);
c2_last = _mm_bitshift_left(b2_last, 127-nb_unused_col);
aa2_last = vec_left1_bin(a2_last,b2_last);
cc2_last = vec_right1_bin_unused_col(b2_last,c2_last,nb_unused_col);
or0_last = vOR3(aa2_last, b2_last, cc2_last);
y_last = vOR3(or0_last, or1_last, or2_last);
vec_store(&m[height-1][nb_vbits_col-1], y_last);
break;
case 1:
b2 = vec_load(&img_bin_extra_lines[height][0]);
c2 = vec_load(&img_bin_extra_lines[height][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or2 = vOR3(aa2, b2, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[height-1][0], y);
b2_last = vec_load(&img_bin_extra_lines[height][nb_vbits_col-1]);
a2_last = vec_load(&img_bin_extra_lines[height][nb_vbits_col-2]);
c2_last = _mm_bitshift_left(b2_last, 127-nb_unused_col);
aa2_last = vec_left1_bin(a2_last,b2_last);
cc2_last = vec_right1_bin_unused_col(b2_last,c2_last,nb_unused_col);
or2_last = vOR3(aa2_last, b2_last, cc2_last);
y_last = vOR3(or0_last, or1_last, or2_last);
vec_store(&m[height-1][nb_vbits_col-1], y_last);
break;
default:
break;
}
_mm_free(img_bin_extra_lines-1);
return m;
}
void compute_all_dilatation_3x3_SIMD_opti(char* basePath, int save)
{
CHECK_ERROR(system("mkdir -p output/dilatation_3x3_SIMD_opti"));
char buff[60];
int nrl, nrh, ncl, nch;
size_t height, width;
uint8 **img, **output;
vbits **img_bin, **output_bin;
for(int i = 1; i < 200; i++){
sprintf(buff, "%s%.3d.pgm", basePath, i);
img = LoadPGM_ui8matrix(buff, &nrl, &nrh, &ncl, &nch);
height = nrh-nrl+1;
width = nch-ncl+1;
img_bin = convert_to_binary(img, height, width);
output_bin = dilatation_3x3_SIMD_opti(img_bin, height, width);
output = convert_from_binary(output_bin, height, width);
if(save){
sprintf(buff, "output/dilatation_3x3_SIMD_opti/dil3%.3d.pgm", i);
SavePGM_ui8matrix(output, nrl, nrh, ncl, nch, buff);
}
free_ui8matrix(img, nrl, nrh, ncl, nch);
free_ui8matrix(output, nrl, nrh, ncl, nch);
free_vbitsmatrix(img_bin, height, width);
free_vbitsmatrix(output_bin, height, width);
}
}
vbits** ouverture_SIMD_naif(vbits** img_bin, int height, int width)
{
vbits ** ero = erosion_3x3_SIMD_naif(img_bin, height, width);
vbits ** ouv = dilatation_3x3_SIMD_naif(ero, height, width);
free_vbitsmatrix(ero, height, width);
return ouv;
}
vbits** fermeture_SIMD_naif(vbits** img_bin, int height, int width)
{
vbits ** dil = dilatation_3x3_SIMD_naif(img_bin, height, width);
vbits ** ferm = erosion_3x3_SIMD_naif(dil, height, width);
free_vbitsmatrix(dil, height, width);
return ferm;
}
vbits** chaine_complete_SIMD_naif(vbits** img_bin, int height, int width)
{
vbits ** ouverture = ouverture_SIMD_naif(img_bin, height, width);
vbits ** fermeture = fermeture_SIMD_naif(ouverture, height, width);
free_vbitsmatrix(ouverture, height, width);
#ifdef BENCH
free_vbitsmatrix(fermeture, height, width);
#endif
return fermeture;
}
// dilatation( erosion() )
vbits ** ouverture_opti_SIMD(vbits** img_bin, int height, int width)
{
vbits ** ero = erosion_3x3_SIMD_opti(img_bin, height, width);
vbits ** ouv = dilatation_3x3_SIMD_opti(ero, height, width);
free_vbitsmatrix(ero, height, width);
return ouv;
}
// erosion( dilatation() )
vbits ** fermeture_opti_SIMD(vbits** img_bin, int height, int width)
{
vbits ** dil = dilatation_3x3_SIMD_opti(img_bin, height, width);
vbits ** ferm = erosion_3x3_SIMD_opti(dil, height, width);
free_vbitsmatrix(dil, height, width);
return ferm;
}
vbits ** ouverture_fusion_SIMD(vbits** img_bin, int height, int width)
{
int nb_vbits_col = ceil((float)width/128);
int nb_unused_col = (128-(width%128))%128;
int n = height%3;
vbits **m = (vbits**)vui32matrix(0, height-1, 0, nb_vbits_col-1);
// Ajout d'un pointeur vers la ligne height-1 de l'image pour gérer plus facilement les bords
vbits **img_bin_extra_lines=(vbits **) _mm_malloc ((size_t)((height+1)*sizeof(vbits*)), 16);
if (!img_bin_extra_lines) vnrerror("allocation failure in ouverture_fusion_SIMD()");
for(int i = 0; i < height; i++)
img_bin_extra_lines[i] = img_bin[i];
img_bin_extra_lines[height] = img_bin_extra_lines[height-1];
vbits a0, a1, a2;
vbits b0, b1, b2;
vbits c0, c1, c2;
vbits aa0, aa1, aa2;
vbits cc0, cc1, cc2;
vbits or0, or1, or2, or0_bis, or1_bis, or2_bis, or0_ter, or1_ter, or2_ter;
vbits and0, and1, and2, and3, and4, and5, and6, and7, and8;
vbits and0_0, and0_1, and0_2, and1_0, and1_1, and1_2, and2_0, and2_1, and2_2;
vbits y;
// CAS DES IMAGES AYANT MOINS DE 128 COLONNES
if(nb_vbits_col == 1){
b0 = vec_load(&img_bin_extra_lines[0][0]);
b1 = vec_load(&img_bin_extra_lines[1][0]);
a0 = _mm_bitshift_right(b0, 127);
a1 = _mm_bitshift_right(b1, 127);
c0 = _mm_bitshift_left(b0, 127-nb_unused_col);
c1 = _mm_bitshift_left(b1, 127-nb_unused_col);
aa0 = vec_left1_bin(a0,b0);
aa1 = vec_left1_bin(a1,b1);
cc0 = vec_right1_bin(b0,c0);
cc1 = vec_right1_bin(b1,c1);
and0_0 = vAND3(aa0, b0, cc0);
and0_1 = vAND3(aa1, b1, cc1);
and0 = vAND3(and0_0, and0_0, and0_1);
and3 = and0;
aa0 = vec_left1_bin(_mm_bitshift_right(and0, 127),and0);
cc0 = vec_right1_bin_unused_col(and0, _mm_bitshift_left(and0, 127-nb_unused_col),nb_unused_col);
or0 = vOR3(aa0, and0, cc0);
or1 = or0;
for(int i = 0; i < height-3; i+=3){
b2 = vec_load(&img_bin_extra_lines[i+2][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
and0_2 = vAND3(aa2, b2, cc2);
and6 = vAND3(and0_2, and0_1, and0_0);
aa0 = vec_left1_bin(_mm_bitshift_right(and6, 127),and6);
cc0 = vec_right1_bin_unused_col(and6, _mm_bitshift_left(and6, 127-nb_unused_col),nb_unused_col);
or2 = vOR3(aa0, and6, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[i][0], y);
b2 = vec_load(&img_bin_extra_lines[i+3][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
and0_0 = vAND3(aa2, b2, cc2);
and0 = vAND3(and0_0, and0_2, and0_1);
aa0 = vec_left1_bin(_mm_bitshift_right(and0, 127),and0);
cc0 = vec_right1_bin_unused_col(and0, _mm_bitshift_left(and0, 127-nb_unused_col),nb_unused_col);
or0 = vOR3(aa0, and0, cc0);
y = vOR3(or0, or2, or1);
vec_store(&m[i+1][0], y);
b2 = vec_load(&img_bin_extra_lines[i+4][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
and0_1 = vAND3(aa2, b2, cc2);
and3 = vAND3(and0_1, and0_0, and0_2);
aa0 = vec_left1_bin(_mm_bitshift_right(and3, 127),and3);
cc0 = vec_right1_bin_unused_col(and3, _mm_bitshift_left(and3, 127-nb_unused_col),nb_unused_col);
or1 = vOR3(aa0, and3, cc0);
y = vOR3(or1, or0, or2);
vec_store(&m[i+2][0], y);
}
switch(n){
case 2 :
and6 = vAND3(and0_1, and0_1, and0_0);
aa0 = vec_left1_bin(_mm_bitshift_right(and6, 127),and6);
cc0 = vec_right1_bin_unused_col(and6, _mm_bitshift_left(and6, 127-nb_unused_col),nb_unused_col);
or2 = vOR3(aa0, and6, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[height-2][0], y);
y = vOR3(or1, or2, or2);
vec_store(&m[height-1][0], y);
break;
case 1 :
y = vOR3(or0, or1, or1);
vec_store(&m[height-1][0], y);
break;
case 0 :
b2 = vec_load(&img_bin_extra_lines[height-1][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
and0_2 = vAND3(aa2, b2, cc2);
and6 = vAND3(and0_2, and0_1, and0_0);
aa0 = vec_left1_bin(_mm_bitshift_right(and6, 127),and6);
cc0 = vec_right1_bin_unused_col(and6, _mm_bitshift_left(and6, 127-nb_unused_col),nb_unused_col);
or2 = vOR3(aa0, and6, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[height-3][0], y);
and0 = vAND3(and0_2, and0_2, and0_1);
aa0 = vec_left1_bin(_mm_bitshift_right(and0, 127),and0);
cc0 = vec_right1_bin_unused_col(and0, _mm_bitshift_left(and0, 127-nb_unused_col),nb_unused_col);
or0 = vOR3(aa0, and0, cc0);
y = vOR3(or0, or2, or1);
vec_store(&m[height-2][0], y);
y = vOR3(or2, or0, or0);
vec_store(&m[height-1][0], y);
break;
}
_mm_free(img_bin_extra_lines);
return m;
}
// CAS DES IMAGES ENTRE 1*128+1 ET 2*128 COLONNES
if(nb_vbits_col == 2){
b0 = vec_load(&img_bin_extra_lines[0][0]);
b1 = vec_load(&img_bin_extra_lines[1][0]);
c0 = vec_load(&img_bin_extra_lines[0][1]);
c1 = vec_load(&img_bin_extra_lines[1][1]);
a0 = _mm_bitshift_right(b0, 127);
a1 = _mm_bitshift_right(b1, 127);
aa0 = vec_left1_bin(a0,b0);
aa1 = vec_left1_bin(a1,b1);
cc0 = vec_right1_bin(b0,c0);
cc1 = vec_right1_bin(b1,c1);
and0_0 = vAND3(aa0, b0, cc0);
and0_1 = vAND3(aa1, b1, cc1);
and0 = vAND3(and0_0, and0_0, and0_1);
and3 = and0;
a0 = _mm_bitshift_left(c0, 127-nb_unused_col);
a1 = _mm_bitshift_left(c1, 127-nb_unused_col);
aa0 = vec_left1_bin(b0,c0);
aa1 = vec_left1_bin(b1,c1);
cc0 = vec_right1_bin(c0,a0);
cc1 = vec_right1_bin(c1,a1);
and1_0 = vAND3(aa0, c0, cc0);
and1_1 = vAND3(aa1, c1, cc1);
and1 = vAND3(and1_0, and1_0, and1_1);
and4 = and1;
aa0 = vec_left1_bin(_mm_bitshift_right(and0, 127),and0);
cc0 = vec_right1_bin(and0,and1);
or0 = vOR3(aa0, and0, cc0);
aa0 = vec_left1_bin(and0,and1);
cc0 = vec_right1_bin_unused_col(and1, _mm_bitshift_left(and1, 127-nb_unused_col),nb_unused_col);
or0_bis = vOR3(aa0, and1, cc0);
or1 = or0;
or1_bis = or0_bis;
for(int i = 0; i < height-3; i+=3){
b2 = vec_load(&img_bin_extra_lines[i+2][0]);
c2 = vec_load(&img_bin_extra_lines[i+2][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_2 = vAND3(aa2, b2, cc2);
and6 = vAND3(and0_2, and0_1, and0_0);
a2 = _mm_bitshift_left(c2, 127-nb_unused_col);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin_unused_col(c2,a2,nb_unused_col);
and1_2 = vAND3(aa2, c2, cc2);
and7 = vAND3(and1_2, and1_1, and1_0);
aa0 = vec_left1_bin(_mm_bitshift_right(and6, 127),and6);
cc0 = vec_right1_bin(and6,and7);
or2 = vOR3(aa0, and6, cc0);
aa0 = vec_left1_bin(and6,and7);
cc0 = vec_right1_bin_unused_col(and7, _mm_bitshift_left(and7, 127-nb_unused_col),nb_unused_col);
or2_bis = vOR3(aa0, and7, cc0);
y = vOR3(or2, or0, or1);
vec_store(&m[i][0], y);
y = vOR3(or2_bis, or0_bis, or1_bis);
vec_store(&m[i][1], y);
b2 = vec_load(&img_bin_extra_lines[i+3][0]);
c2 = vec_load(&img_bin_extra_lines[i+3][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_0 = vAND3(aa2, b2, cc2);
and0 = vAND3(and0_0, and0_2, and0_1);
a2 = _mm_bitshift_left(c2, 127-nb_unused_col);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin_unused_col(c2,a2,nb_unused_col);
and1_0 = vAND3(aa2, c2, cc2);
and1 = vAND3(and1_0, and1_2, and1_1);
aa0 = vec_left1_bin(_mm_bitshift_right(and0, 127),and0);
cc0 = vec_right1_bin(and0,and1);
or0 = vOR3(aa0, and0, cc0);
aa0 = vec_left1_bin(and0,and1);
cc0 = vec_right1_bin_unused_col(and1, _mm_bitshift_left(and1, 127-nb_unused_col),nb_unused_col);
or0_bis = vOR3(aa0, and1, cc0);
y = vOR3(or0, or2, or1);
vec_store(&m[i+1][0], y);
y = vOR3(or0_bis, or2_bis, or1_bis);
vec_store(&m[i+1][1], y);
b2 = vec_load(&img_bin_extra_lines[i+4][0]);
c2 = vec_load(&img_bin_extra_lines[i+4][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_1 = vAND3(aa2, b2, cc2);
and3 = vAND3(and0_1, and0_0, and0_2);
a2 = _mm_bitshift_left(c2, 127-nb_unused_col);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin_unused_col(c2,a2,nb_unused_col);
and1_1 = vAND3(aa2, c2, cc2);
and4 = vAND3(and1_1, and1_0, and1_2);
aa0 = vec_left1_bin(_mm_bitshift_right(and3, 127),and3);
cc0 = vec_right1_bin(and3,and4);
or1 = vOR3(aa0, and3, cc0);
aa0 = vec_left1_bin(and3,and4);
cc0 = vec_right1_bin_unused_col(and4, _mm_bitshift_left(and4, 127-nb_unused_col),nb_unused_col);
or1_bis = vOR3(aa0, and4, cc0);
y = vOR3(or1, or0, or2);
vec_store(&m[i+2][0], y);
y = vOR3(or1_bis, or0_bis, or2_bis);
vec_store(&m[i+2][1], y);
}
switch(n){
case 2 :
and6 = vAND3(and0_1, and0_1, and0_0);
and7 = vAND3(and1_1, and1_1, and1_0);
aa0 = vec_left1_bin(_mm_bitshift_right(and6, 127),and6);
cc0 = vec_right1_bin(and6,and7);
or2 = vOR3(aa0, and6, cc0);
aa0 = vec_left1_bin(and6,and7);
cc0 = vec_right1_bin_unused_col(and7, _mm_bitshift_left(and7, 127-nb_unused_col),nb_unused_col);
or2_bis = vOR3(aa0, and7, cc0);
y = vOR3(or2, or0, or1);
vec_store(&m[height-2][0], y);
y = vOR3(or2_bis, or0_bis, or1_bis);
vec_store(&m[height-2][1], y);
y = vOR3(or1, or2, or2);
vec_store(&m[height-1][0], y);
y = vOR3(or1_bis, or2_bis, or2_bis);
vec_store(&m[height-1][1], y);
break;
case 1 :
y = vOR3(or0, or1, or1);
vec_store(&m[height-1][0], y);
y = vOR3(or0_bis, or1_bis, or1_bis);
vec_store(&m[height-1][1], y);
break;
case 0 :
b2 = vec_load(&img_bin_extra_lines[height-1][0]);
c2 = vec_load(&img_bin_extra_lines[height-1][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_2 = vAND3(aa2, b2, cc2);
and6 = vAND3(and0_2, and0_1, and0_0);
a2 = _mm_bitshift_left(c2, 127-nb_unused_col);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin_unused_col(c2,a2,nb_unused_col);
and1_2 = vAND3(aa2, c2, cc2);
and7 = vAND3(and1_2, and1_1, and1_0);
aa0 = vec_left1_bin(_mm_bitshift_right(and6, 127),and6);
cc0 = vec_right1_bin(and6,and7);
or2 = vOR3(aa0, and6, cc0);
aa0 = vec_left1_bin(and6,and7);
cc0 = vec_right1_bin_unused_col(and7, _mm_bitshift_left(and7, 127-nb_unused_col),nb_unused_col);
or2_bis = vOR3(aa0, and7, cc0);
y = vOR3(or2, or0, or1);
vec_store(&m[height-3][0], y);
y = vOR3(or2_bis, or0_bis, or1_bis);
vec_store(&m[height-3][1], y);
and0 = vAND3(and0_2, and0_2, and0_1);
and1 = vAND3(and1_2, and1_2, and1_1);
aa0 = vec_left1_bin(_mm_bitshift_right(and0, 127),and0);
cc0 = vec_right1_bin(and0,and1);
or0 = vOR3(aa0, and0, cc0);
aa0 = vec_left1_bin(and0,and1);
cc0 = vec_right1_bin_unused_col(and1, _mm_bitshift_left(and1, 127-nb_unused_col),nb_unused_col);
or0_bis = vOR3(aa0, and1, cc0);
y = vOR3(or0, or2, or1);
vec_store(&m[height-2][0], y);
y = vOR3(or0_bis, or2_bis, or1_bis);
vec_store(&m[height-2][1], y);
y = vOR3(or2, or0, or0);
vec_store(&m[height-1][0], y);
y = vOR3(or2_bis, or0_bis, or0_bis);
vec_store(&m[height-1][1], y);
break;
default:
break;
}
_mm_free(img_bin_extra_lines);
return m;
}
// CAS DES IMAGES ENTRE 2*128+1 ET 3*128 COLONNES
if(nb_vbits_col == 3){
b0 = vec_load(&img_bin_extra_lines[0][0]);
b1 = vec_load(&img_bin_extra_lines[1][0]);
c0 = vec_load(&img_bin_extra_lines[0][1]);
c1 = vec_load(&img_bin_extra_lines[1][1]);
a0 = _mm_bitshift_right(b0, 127);
a1 = _mm_bitshift_right(b1, 127);
aa0 = vec_left1_bin(a0,b0);
aa1 = vec_left1_bin(a1,b1);
cc0 = vec_right1_bin(b0,c0);
cc1 = vec_right1_bin(b1,c1);
and0_0 = vAND3(aa0, b0, cc0);
and0_1 = vAND3(aa1, b1, cc1);
and0 = vAND3(and0_0, and0_0, and0_1);
and3 = and0;
a0 = vec_load(&img_bin_extra_lines[0][2]);
a1 = vec_load(&img_bin_extra_lines[1][2]);
aa0 = vec_left1_bin(b0,c0);
aa1 = vec_left1_bin(b1,c1);
cc0 = vec_right1_bin(c0,a0);
cc1 = vec_right1_bin(c1,a1);
and1_0 = vAND3(aa0, c0, cc0);
and1_1 = vAND3(aa1, c1, cc1);
and1 = vAND3(and1_0, and1_0, and1_1);
and4 = and1;
b0 = _mm_bitshift_left(a0, 127-nb_unused_col);
b1 = _mm_bitshift_left(a1, 127-nb_unused_col);
aa0 = vec_left1_bin(c0,a0);
aa1 = vec_left1_bin(c1,a1);
cc0 = vec_right1_bin_unused_col(a0, b0, nb_unused_col);
cc1 = vec_right1_bin_unused_col(a1, b1, nb_unused_col);
and2_0 = vAND3(aa0, a0, cc0);
and2_1 = vAND3(aa1, a1, cc1);
and2 = vAND3(and2_0, and2_0, and2_1);
and5 = and2;
aa0 = vec_left1_bin(_mm_bitshift_right(and0, 127),and0);
cc0 = vec_right1_bin(and0,and1);
or0 = vOR3(aa0, and0, cc0);
aa0 = vec_left1_bin(and0,and1);
cc0 = vec_right1_bin(and1,and2);
or0_bis = vOR3(aa0, and1, cc0);
aa0 = vec_left1_bin(and1,and2);
cc0 = vec_right1_bin_unused_col(and2, _mm_bitshift_left(and2, 127-nb_unused_col),nb_unused_col);
or0_ter = vOR3(aa0, and2, cc0);
or1 = or0;
or1_bis = or0_bis;
or1_ter = or0_ter;
for(int i = 0; i < height-3; i+=3){
b2 = vec_load(&img_bin_extra_lines[i+2][0]);
c2 = vec_load(&img_bin_extra_lines[i+2][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_2 = vAND3(aa2, b2, cc2);
and6 = vAND3(and0_2, and0_1, and0_0);
a2 = vec_load(&img_bin_extra_lines[i+2][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and1_2 = vAND3(aa2, c2, cc2);
and7 = vAND3(and1_2, and1_1, and1_0);
b2 = _mm_bitshift_left(a2, 127-nb_unused_col);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin_unused_col(a2,b2,nb_unused_col);
and2_2 = vAND3(aa2, a2, cc2);
and8 = vAND3(and2_2, and2_1, and2_0);
aa0 = vec_left1_bin(_mm_bitshift_right(and6, 127),and6);
cc0 = vec_right1_bin(and6,and7);
or2 = vOR3(aa0, and6, cc0);
aa0 = vec_left1_bin(and6,and7);
cc0 = vec_right1_bin(and7,and8);
or2_bis = vOR3(aa0, and7, cc0);
aa0 = vec_left1_bin(and7,and8);
cc0 = vec_right1_bin_unused_col(and8, _mm_bitshift_left(and8, 127-nb_unused_col),nb_unused_col);
or2_ter = vOR3(aa0, and8, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[i][0], y);
y = vOR3(or0_bis, or1_bis, or2_bis);
vec_store(&m[i][1], y);
y = vOR3(or0_ter, or1_ter, or2_ter);
vec_store(&m[i][2], y);
b2 = vec_load(&img_bin_extra_lines[i+3][0]);
c2 = vec_load(&img_bin_extra_lines[i+3][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_0 = vAND3(aa2, b2, cc2);
and0 = vAND3(and0_0, and0_2, and0_1);
a2 = vec_load(&img_bin_extra_lines[i+3][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and1_0 = vAND3(aa2, c2, cc2);
and1 = vAND3(and1_0, and1_2, and1_1);
b2 = _mm_bitshift_left(a2, 127-nb_unused_col);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin_unused_col(a2,b2,nb_unused_col);
and2_0 = vAND3(aa2, a2, cc2);
and2 = vAND3(and2_0, and2_2, and2_1);
aa0 = vec_left1_bin(_mm_bitshift_right(and0, 127),and0);
cc0 = vec_right1_bin(and0,and1);
or0 = vOR3(aa0, and0, cc0);
aa0 = vec_left1_bin(and0,and1);
cc0 = vec_right1_bin(and1,and2);
or0_bis = vOR3(aa0, and1, cc0);
aa0 = vec_left1_bin(and1,and2);
cc0 = vec_right1_bin_unused_col(and2, _mm_bitshift_left(and2, 127-nb_unused_col),nb_unused_col);
or0_ter = vOR3(aa0, and2, cc0);
y = vOR3(or0, or2, or1);
vec_store(&m[i+1][0], y);
y = vOR3(or0_bis, or2_bis, or1_bis);
vec_store(&m[i+1][1], y);
y = vOR3(or0_ter, or2_ter, or1_ter);
vec_store(&m[i+1][2], y);
b2 = vec_load(&img_bin_extra_lines[i+4][0]);
c2 = vec_load(&img_bin_extra_lines[i+4][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_1 = vAND3(aa2, b2, cc2);
and3 = vAND3(and0_1, and0_0, and0_2);
a2 = vec_load(&img_bin_extra_lines[i+4][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and1_1 = vAND3(aa2, c2, cc2);
and4 = vAND3(and1_1, and1_0, and1_2);
b2 = _mm_bitshift_left(a2, 127-nb_unused_col);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin_unused_col(a2,b2,nb_unused_col);
and2_1 = vAND3(aa2, a2, cc2);
and5 = vAND3(and2_1, and2_0, and2_2);
aa0 = vec_left1_bin(_mm_bitshift_right(and3, 127),and3);
cc0 = vec_right1_bin(and3,and4);
or1 = vOR3(aa0, and3, cc0);
aa0 = vec_left1_bin(and3,and4);
cc0 = vec_right1_bin(and4,and5);
or1_bis = vOR3(aa0, and4, cc0);
aa0 = vec_left1_bin(and4,and5);
cc0 = vec_right1_bin_unused_col(and5, _mm_bitshift_left(and5, 127-nb_unused_col),nb_unused_col);
or1_ter = vOR3(aa0, and5, cc0);
y = vOR3(or1, or0, or2);
vec_store(&m[i+2][0], y);
y = vOR3(or1_bis, or0_bis, or2_bis);
vec_store(&m[i+2][1], y);
y = vOR3(or1_ter, or0_ter, or2_ter);
vec_store(&m[i+2][2], y);
}
switch (n) {
case 2:
and6 = vAND3(and0_1, and0_1, and0_0);
and7 = vAND3(and1_1, and1_1, and1_0);
and8 = vAND3(and2_1, and2_1, and2_0);
aa0 = vec_left1_bin(_mm_bitshift_right(and6, 127),and6);
cc0 = vec_right1_bin(and6,and7);
or2 = vOR3(aa0, and6, cc0);
aa0 = vec_left1_bin(and6,and7);
cc0 = vec_right1_bin(and7,and8);
or2_bis = vOR3(aa0, and7, cc0);
aa0 = vec_left1_bin(and7,and8);
cc0 = vec_right1_bin_unused_col(and8, _mm_bitshift_left(and8, 127-nb_unused_col),nb_unused_col);
or2_ter = vOR3(aa0, and8, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[height-2][0], y);
y = vOR3(or0_bis, or1_bis, or2_bis);
vec_store(&m[height-2][1], y);
y = vOR3(or0_ter, or1_ter, or2_ter);
vec_store(&m[height-2][2], y);
y = vOR3(or1, or2, or2);
vec_store(&m[height-1][0], y);
y = vOR3(or1_bis, or2_bis, or2_bis);
vec_store(&m[height-1][1], y);
y = vOR3(or1_ter, or2_ter, or2_ter);
vec_store(&m[height-1][2], y);
break;
case 1 :
y = vOR3(or0, or1, or1);
vec_store(&m[height-1][0], y);
y = vOR3(or0_bis, or1_bis, or1_bis);
vec_store(&m[height-1][1], y);
y = vOR3(or0_ter, or1_ter, or1_ter);
vec_store(&m[height-1][2], y);
break;
case 0 :
b2 = vec_load(&img_bin_extra_lines[height-1][0]);
c2 = vec_load(&img_bin_extra_lines[height-1][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_2 = vAND3(aa2, b2, cc2);
and6 = vAND3(and0_0, and0_2, and0_1);
a2 = vec_load(&img_bin_extra_lines[height-1][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and1_2 = vAND3(aa2, c2, cc2);
and7 = vAND3(and1_0, and1_2, and1_1);
b2 = _mm_bitshift_left(a2, 127-nb_unused_col);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin_unused_col(a2,b2,nb_unused_col);
and2_2 = vAND3(aa2, a2, cc2);
and8 = vAND3(and2_0, and2_2, and2_1);
aa0 = vec_left1_bin(_mm_bitshift_right(and6, 127),and6);
cc0 = vec_right1_bin(and6,and7);
or2 = vOR3(aa0, and6, cc0);
aa0 = vec_left1_bin(and6,and7);
cc0 = vec_right1_bin(and7,and8);
or2_bis = vOR3(aa0, and7, cc0);
aa0 = vec_left1_bin(and7,and8);
cc0 = vec_right1_bin_unused_col(and8, _mm_bitshift_left(and8, 127-nb_unused_col),nb_unused_col);
or2_ter = vOR3(aa0, and8, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[height-3][0], y);
y = vOR3(or0_bis, or1_bis, or2_bis);
vec_store(&m[height-3][1], y);
y = vOR3(or0_ter, or1_ter, or2_ter);
vec_store(&m[height-3][2], y);
and0 = vAND3(and0_2, and0_2, and0_1);
and1 = vAND3(and1_2, and1_2, and1_1);
and2 = vAND3(and2_2, and2_2, and2_1);
aa0 = vec_left1_bin(_mm_bitshift_right(and0, 127),and0);
cc0 = vec_right1_bin(and0,and1);
or0 = vOR3(aa0, and0, cc0);
aa0 = vec_left1_bin(and0,and1);
cc0 = vec_right1_bin(and1,and2);
or0_bis = vOR3(aa0, and1, cc0);
aa0 = vec_left1_bin(and1,and2);
cc0 = vec_right1_bin_unused_col(and2, _mm_bitshift_left(and2, 127-nb_unused_col),nb_unused_col);
or0_ter = vOR3(aa0, and2, cc0);
y = vOR3(or0, or2, or1);
vec_store(&m[height-2][0], y);
y = vOR3(or0_bis, or2_bis, or1_bis);
vec_store(&m[height-2][1], y);
y = vOR3(or0_ter, or2_ter, or1_ter);
vec_store(&m[height-2][2], y);
y = vOR3(or2, or0, or0);
vec_store(&m[height-1][0], y);
y = vOR3(or2_bis, or0_bis, or0_bis);
vec_store(&m[height-1][1], y);
y = vOR3(or2_ter, or0_ter, or0_ter);
vec_store(&m[height-1][2], y);
break;
}
_mm_free(img_bin_extra_lines);
return m;
}
// CAS DES IMAGES DE 4*128 COLONNES OU PLUS
// Première et deuxième colonne
b0 = vec_load(&img_bin_extra_lines[0][0]);
b1 = vec_load(&img_bin_extra_lines[1][0]);
c0 = vec_load(&img_bin_extra_lines[0][1]);
c1 = vec_load(&img_bin_extra_lines[1][1]);
a0 = _mm_bitshift_right(b0, 127);
a1 = _mm_bitshift_right(b1, 127);
aa0 = vec_left1_bin(a0,b0);
aa1 = vec_left1_bin(a1,b1);
cc0 = vec_right1_bin(b0,c0);
cc1 = vec_right1_bin(b1,c1);
and0_0 = vAND3(aa0, b0, cc0);
and0_1 = vAND3(aa1, b1, cc1);
and0 = vAND3(and0_0, and0_0, and0_1);
and3 = and0;
a0 = vec_load(&img_bin_extra_lines[0][2]);
a1 = vec_load(&img_bin_extra_lines[1][2]);
aa0 = vec_left1_bin(b0,c0);
aa1 = vec_left1_bin(b1,c1);
cc0 = vec_right1_bin(c0,a0);
cc1 = vec_right1_bin(c1,a1);
and1_0 = vAND3(aa0, c0, cc0);
and1_1 = vAND3(aa1, c1, cc1);
and1 = vAND3(and1_0, and1_0, and1_1);
and4 = and1;
b0 = vec_load(&img_bin_extra_lines[0][3]);
b1 = vec_load(&img_bin_extra_lines[1][3]);
aa0 = vec_left1_bin(c0,a0);
aa1 = vec_left1_bin(c1,a1);
cc0 = vec_right1_bin(a0,b0);
cc1 = vec_right1_bin(a1,b1);
and2_0 = vAND3(aa0, a0, cc0);
and2_1 = vAND3(aa1, a1, cc1);
and2 = vAND3(and2_0, and2_0, and2_1);
and5 = and2;
aa0 = vec_left1_bin(_mm_bitshift_right(and0, 127),and0);
cc0 = vec_right1_bin(and0,and1);
or0 = vOR3(aa0, and0, cc0);
aa0 = vec_left1_bin(and0,and1);
cc0 = vec_right1_bin(and1,and2);
or0_bis = vOR3(aa0, and1, cc0);
or1 = or0;
or1_bis = or0_bis;
for(int i = 0; i < height-3; i+=3){
b2 = vec_load(&img_bin_extra_lines[i+2][0]);
c2 = vec_load(&img_bin_extra_lines[i+2][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_2 = vAND3(aa2, b2, cc2);
and6 = vAND3(and0_2, and0_1, and0_0);
a2 = vec_load(&img_bin_extra_lines[i+2][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and1_2 = vAND3(aa2, c2, cc2);
and7 = vAND3(and1_2, and1_1, and1_0);
b2 = vec_load(&img_bin_extra_lines[i+2][3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
and2_2 = vAND3(aa2, a2, cc2);
and8 = vAND3(and2_2, and2_1, and2_0);
aa0 = vec_left1_bin(_mm_bitshift_right(and6, 127),and6);
cc0 = vec_right1_bin(and6,and7);
or2 = vOR3(aa0, and6, cc0);
aa0 = vec_left1_bin(and6,and7);
cc0 = vec_right1_bin(and7,and8);
or2_bis = vOR3(aa0, and7, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[i][0], y);
y = vOR3(or0_bis, or1_bis, or2_bis);
vec_store(&m[i][1], y);
b2 = vec_load(&img_bin_extra_lines[i+3][0]);
c2 = vec_load(&img_bin_extra_lines[i+3][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_0 = vAND3(aa2, b2, cc2);
and0 = vAND3(and0_0, and0_2, and0_1);
a2 = vec_load(&img_bin_extra_lines[i+3][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and1_0 = vAND3(aa2, c2, cc2);
and1 = vAND3(and1_0, and1_2, and1_1);
b2 = vec_load(&img_bin_extra_lines[i+3][3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
and2_0 = vAND3(aa2, a2, cc2);
and2 = vAND3(and2_0, and2_2, and2_1);
aa0 = vec_left1_bin(_mm_bitshift_right(and0, 127),and0);
cc0 = vec_right1_bin(and0,and1);
or0 = vOR3(aa0, and0, cc0);
aa0 = vec_left1_bin(and0,and1);
cc0 = vec_right1_bin(and1,and2);
or0_bis = vOR3(aa0, and1, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[i+1][0], y);
y = vOR3(or0_bis, or1_bis, or2_bis);
vec_store(&m[i+1][1], y);
b2 = vec_load(&img_bin_extra_lines[i+4][0]);
c2 = vec_load(&img_bin_extra_lines[i+4][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_1 = vAND3(aa2, b2, cc2);
and3 = vAND3(and0_1, and0_0, and0_2);
a2 = vec_load(&img_bin_extra_lines[i+4][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and1_1 = vAND3(aa2, c2, cc2);
and4 = vAND3(and1_1, and1_0, and1_2);
b2 = vec_load(&img_bin_extra_lines[i+4][3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
and2_1 = vAND3(aa2, a2, cc2);
and5 = vAND3(and2_1, and2_0, and2_2);
aa0 = vec_left1_bin(_mm_bitshift_right(and3, 127),and3);
cc0 = vec_right1_bin(and3,and4);
or1 = vOR3(aa0, and3, cc0);
aa0 = vec_left1_bin(and3,and4);
cc0 = vec_right1_bin(and4,and5);
or1_bis = vOR3(aa0, and4, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[i+2][0], y);
y = vOR3(or0_bis, or1_bis, or2_bis);
vec_store(&m[i+2][1], y);
}
switch(n) {
case 2 :
and6 = vAND3(and0_1, and0_1, and0_0);
and7 = vAND3(and1_1, and1_1, and1_0);
and8 = vAND3(and2_1, and2_1, and2_0);
aa0 = vec_left1_bin(_mm_bitshift_right(and6, 127),and6);
cc0 = vec_right1_bin(and6,and7);
or2 = vOR3(aa0, and6, cc0);
aa0 = vec_left1_bin(and6,and7);
cc0 = vec_right1_bin(and7,and8);
or2_bis = vOR3(aa0, and7, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[height-2][0], y);
y = vOR3(or0_bis, or1_bis, or2_bis);
vec_store(&m[height-2][1], y);
y = vOR3(or1, or2, or2);
vec_store(&m[height-1][0], y);
y = vOR3(or1_bis, or2_bis, or2_bis);
vec_store(&m[height-1][1], y);
break;
case 1 :
y = vOR3(or0, or1, or1);
vec_store(&m[height-1][0], y);
y = vOR3(or0_bis, or1_bis, or1_bis);
vec_store(&m[height-1][1], y);
break;
case 0:
b2 = vec_load(&img_bin_extra_lines[height-1][0]);
c2 = vec_load(&img_bin_extra_lines[height-1][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_2 = vAND3(aa2, b2, cc2);
and6 = vAND3(and0_2, and0_1, and0_0);
a2 = vec_load(&img_bin_extra_lines[height-1][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and1_2 = vAND3(aa2, c2, cc2);
and7 = vAND3(and1_2, and1_1, and1_0);
b2 = vec_load(&img_bin_extra_lines[height-1][3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
and2_2 = vAND3(aa2, a2, cc2);
and8 = vAND3(and2_2, and2_1, and2_0);
aa0 = vec_left1_bin(_mm_bitshift_right(and6, 127),and6);
cc0 = vec_right1_bin(and6,and7);
or2 = vOR3(aa0, and6, cc0);
aa0 = vec_left1_bin(and6,and7);
cc0 = vec_right1_bin(and7,and8);
or2_bis = vOR3(aa0, and7, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[height-3][0], y);
y = vOR3(or0_bis, or1_bis, or2_bis);
vec_store(&m[height-3][1], y);
and0 = vAND3(and0_2, and0_2, and0_1);
and1 = vAND3(and1_2, and1_2, and1_1);
and2 = vAND3(and2_2, and2_2, and2_1);
aa0 = vec_left1_bin(_mm_bitshift_right(and0, 127),and0);
cc0 = vec_right1_bin(and0,and1);
or0 = vOR3(aa0, and0, cc0);
aa0 = vec_left1_bin(and0,and1);
cc0 = vec_right1_bin(and1,and2);
or0_bis = vOR3(aa0, and1, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[height-2][0], y);
y = vOR3(or0_bis, or1_bis, or2_bis);
vec_store(&m[height-2][1], y);
y = vOR3(or2, or0, or0);
vec_store(&m[height-1][0], y);
y = vOR3(or2_bis, or0_bis, or0_bis);
vec_store(&m[height-1][1], y);
break;
default:
break;
}
for(int j = 2; j < nb_vbits_col-2; j++){
a0 = vec_load(&img_bin_extra_lines[0][j-2]);
a1 = vec_load(&img_bin_extra_lines[1][j-2]);
b0 = vec_load(&img_bin_extra_lines[0][j-1]);
b1 = vec_load(&img_bin_extra_lines[1][j-1]);
c0 = vec_load(&img_bin_extra_lines[0][j-0]);
c1 = vec_load(&img_bin_extra_lines[1][j-0]);
aa0 = vec_left1_bin(a0,b0);
aa1 = vec_left1_bin(a1,b1);
cc0 = vec_right1_bin(b0,c0);
cc1 = vec_right1_bin(b1,c1);
and0_0 = vAND3(aa0, b0, cc0);
and0_1 = vAND3(aa1, b1, cc1);
and0 = vAND3(and0_0, and0_0, and0_1);
and3 = and0;
a0 = vec_load(&img_bin_extra_lines[0][j+1]);
a1 = vec_load(&img_bin_extra_lines[1][j+1]);
aa0 = vec_left1_bin(b0,c0);
aa1 = vec_left1_bin(b1,c1);
cc0 = vec_right1_bin(c0,a0);
cc1 = vec_right1_bin(c1,a1);
and1_0 = vAND3(aa0, c0, cc0);
and1_1 = vAND3(aa1, c1, cc1);
and1 = vAND3(and1_0, and1_0, and1_1);
and4 = and1;
b0 = vec_load(&img_bin_extra_lines[0][j+2]);
b1 = vec_load(&img_bin_extra_lines[1][j+2]);
aa0 = vec_left1_bin(c0,a0);
aa1 = vec_left1_bin(c1,a1);
cc0 = vec_right1_bin(a0,b0);
cc1 = vec_right1_bin(a1,b1);
and2_0 = vAND3(aa0, a0, cc0);
and2_1 = vAND3(aa1, a1, cc1);
and2 = vAND3(and2_0, and2_0, and2_1);
and5 = and2;
aa0 = vec_left1_bin(and0, and1);
cc0 = vec_right1_bin(and1, and2);
or0 = vOR3(aa0, and1, cc0);
or1 = or0;
for(int i = 0; i < height-3; i+=3){
a2 = vec_load(&img_bin_extra_lines[i+2][j-2]);
b2 = vec_load(&img_bin_extra_lines[i+2][j-1]);
c2 = vec_load(&img_bin_extra_lines[i+2][j-0]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_2 = vAND3(aa2, b2, cc2);
and6 = vAND3(and0_2, and0_1, and0_0);
a2 = vec_load(&img_bin_extra_lines[i+2][j+1]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and1_2 = vAND3(aa2, c2, cc2);
and7 = vAND3(and1_2, and1_1, and1_0);
b2 = vec_load(&img_bin_extra_lines[i+2][j+2]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
and2_2 = vAND3(aa2, a2, cc2);
and8 = vAND3(and2_2, and2_1, and2_0);
aa2 = vec_left1_bin(and6, and7);
cc2 = vec_right1_bin(and7, and8);
or2 = vOR3(aa2, and7, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[i][j], y);
a2 = vec_load(&img_bin_extra_lines[i+3][j-2]);
b2 = vec_load(&img_bin_extra_lines[i+3][j-1]);
c2 = vec_load(&img_bin_extra_lines[i+3][j-0]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_0 = vAND3(aa2, b2, cc2);
and0 = vAND3(and0_0, and0_2, and0_1);
a2 = vec_load(&img_bin_extra_lines[i+3][j+1]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and1_0 = vAND3(aa2, c2, cc2);
and1 = vAND3(and1_0, and1_2, and1_1);
b2 = vec_load(&img_bin_extra_lines[i+3][j+2]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
and2_0 = vAND3(aa2, a2, cc2);
and2 = vAND3(and2_0, and2_2, and2_1);
aa2 = vec_left1_bin(and0, and1);
cc2 = vec_right1_bin(and1, and2);
or0 = vOR3(aa2, and1, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[i+1][j], y);
a2 = vec_load(&img_bin_extra_lines[i+4][j-2]);
b2 = vec_load(&img_bin_extra_lines[i+4][j-1]);
c2 = vec_load(&img_bin_extra_lines[i+4][j-0]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_1 = vAND3(aa2, b2, cc2);
and3 = vAND3(and0_1, and0_0, and0_2);
a2 = vec_load(&img_bin_extra_lines[i+4][j+1]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and1_1 = vAND3(aa2, c2, cc2);
and4 = vAND3(and1_1, and1_0, and1_2);
b2 = vec_load(&img_bin_extra_lines[i+4][j+2]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
and2_1 = vAND3(aa2, a2, cc2);
and5 = vAND3(and2_1, and2_0, and2_2);
aa2 = vec_left1_bin(and3, and4);
cc2 = vec_right1_bin(and4, and5);
or1 = vOR3(aa2, and4, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[i+2][j], y);
}
switch(n){
case 2:
and6 = vAND3(and0_1, and0_1, and0_0);
and7 = vAND3(and1_1, and1_1, and1_0);
and8 = vAND3(and2_1, and2_1, and2_0);
aa2 = vec_left1_bin(and6, and7);
cc2 = vec_right1_bin(and7, and8);
or2 = vOR3(aa2, and7, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[height-2][j], y);
y = vOR3(or1, or2, or2);
vec_store(&m[height-1][j], y);
break;
case 1:
y = vOR3(or0, or1, or1);
vec_store(&m[height-1][j], y);
break;
case 0:
a2 = vec_load(&img_bin_extra_lines[height-1][j-2]);
b2 = vec_load(&img_bin_extra_lines[height-1][j-1]);
c2 = vec_load(&img_bin_extra_lines[height-1][j-0]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
and0_2 = vAND3(aa2, b2, cc2);
and6 = vAND3(and0_2, and0_1, and0_0);
a2 = vec_load(&img_bin_extra_lines[height-1][j+1]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and1_2 = vAND3(aa2, c2, cc2);
and7 = vAND3(and1_2, and1_1, and1_0);
b2 = vec_load(&img_bin_extra_lines[height-1][j+2]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
and2_2 = vAND3(aa2, a2, cc2);
and8 = vAND3(and2_2, and2_1, and2_0);
aa2 = vec_left1_bin(and6, and7);
cc2 = vec_right1_bin(and7, and8);
or2 = vOR3(aa2, and7, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[height-3][j], y);
and0 = vAND3(and0_2, and0_2, and0_1);
and1 = vAND3(and1_2, and1_2, and1_1);
and2 = vAND3(and2_2, and2_2, and2_1);
aa2 = vec_left1_bin(and0, and1);
cc2 = vec_right1_bin(and1, and2);
or0 = vOR3(aa2, and1, cc2);
y = vOR3(or0, or1, or2);
vec_store(&m[height-2][j], y);
y = vOR3(or2, or0, or0);
vec_store(&m[height-1][j], y);
break;
default:
break;
}
}
// Avant dernière et dernière colonne
b0 = vec_load(&img_bin_extra_lines[0][nb_vbits_col-1]);
b1 = vec_load(&img_bin_extra_lines[1][nb_vbits_col-1]);
a0 = vec_load(&img_bin_extra_lines[0][nb_vbits_col-2]);
a1 = vec_load(&img_bin_extra_lines[1][nb_vbits_col-2]);
c0 = _mm_bitshift_left(b0, 127-nb_unused_col);
c1 = _mm_bitshift_left(b1, 127-nb_unused_col);
aa0 = vec_left1_bin(a0,b0);
aa1 = vec_left1_bin(a1,b1);
cc0 = vec_right1_bin_unused_col(b0,c0,nb_unused_col);
cc1 = vec_right1_bin_unused_col(b1,c1,nb_unused_col);
and0_0 = vAND3(aa0, b0, cc0);
and0_1 = vAND3(aa1, b1, cc1);
and0 = vAND3(and0_0, and0_0, and0_1);
and3 = and0;
c0 = vec_load(&img_bin_extra_lines[0][nb_vbits_col-3]);
c1 = vec_load(&img_bin_extra_lines[1][nb_vbits_col-3]);
aa0 = vec_left1_bin(c0,a0);
aa1 = vec_left1_bin(c1,a1);
cc0 = vec_right1_bin(a0,b0);
cc1 = vec_right1_bin(a1,b1);
and1_0 = vAND3(aa0, a0, cc0);
and1_1 = vAND3(aa1, a1, cc1);
and1 = vAND3(and1_0, and1_0, and1_1);
and4 = and1;
b0 = vec_load(&img_bin_extra_lines[0][nb_vbits_col-4]);
b1 = vec_load(&img_bin_extra_lines[1][nb_vbits_col-4]);
aa0 = vec_left1_bin(b0,c0);
aa1 = vec_left1_bin(b1,c1);
cc0 = vec_right1_bin(c0,a0);
cc1 = vec_right1_bin(c1,a1);
and2_0 = vAND3(aa0, c0, cc0);
and2_1 = vAND3(aa1, c1, cc1);
and2 = vAND3(and2_0, and2_0, and2_1);
and5 = and2;
aa0 = vec_left1_bin(and1,and0);
cc0 = vec_right1_bin_unused_col(and0, _mm_bitshift_left(and0, 127-nb_unused_col),nb_unused_col);
or0 = vOR3(aa0, and0, cc0);
aa0 = vec_left1_bin(and2,and1);
cc0 = vec_right1_bin(and1,and0);
or0_bis = vOR3(aa0, and1, cc0);
or1 = or0;
or1_bis = or0_bis;
for(int i = 0; i < height-3; i+=3){
a2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-2]);
b2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-1]);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
and0_2 = vAND3(aa2, b2, cc2);
and6 = vAND3(and0_2, and0_1, and0_0);
c2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
and1_2 = vAND3(aa2, a2, cc2);
and7 = vAND3(and1_2, and1_1, and1_0);
b2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-4]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and2_2 = vAND3(aa2, c2, cc2);
and8 = vAND3(and2_2, and2_1, and2_0);
aa0 = vec_left1_bin(and7,and6);
cc0 = vec_right1_bin_unused_col(and6, _mm_bitshift_left(and6, 127-nb_unused_col),nb_unused_col);
or2 = vOR3(aa0, and6, cc0);
aa0 = vec_left1_bin(and8,and7);
cc0 = vec_right1_bin(and7,and6);
or2_bis = vOR3(aa0, and7, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[i][nb_vbits_col-1], y);
y = vOR3(or0_bis, or1_bis, or2_bis);
vec_store(&m[i][nb_vbits_col-2], y);
a2 = vec_load(&img_bin_extra_lines[i+3][nb_vbits_col-2]);
b2 = vec_load(&img_bin_extra_lines[i+3][nb_vbits_col-1]);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
and0_0 = vAND3(aa2, b2, cc2);
and0 = vAND3(and0_0, and0_2, and0_1);
c2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
and1_0 = vAND3(aa2, a2, cc2);
and1 = vAND3(and1_0, and1_2, and1_1);
b2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-4]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and2_0 = vAND3(aa2, c2, cc2);
and2 = vAND3(and2_0, and2_2, and2_1);
aa0 = vec_left1_bin(and1,and0);
cc0 = vec_right1_bin_unused_col(and0, _mm_bitshift_left(and0, 127-nb_unused_col),nb_unused_col);
or0 = vOR3(aa0, and0, cc0);
aa0 = vec_left1_bin(and2,and1);
cc0 = vec_right1_bin(and1,and0);
or0_bis = vOR3(aa0, and1, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[i+1][nb_vbits_col-1], y);
y = vOR3(or0_bis, or1_bis, or2_bis);
vec_store(&m[i+1][nb_vbits_col-2], y);
a2 = vec_load(&img_bin_extra_lines[i+4][nb_vbits_col-2]);
b2 = vec_load(&img_bin_extra_lines[i+4][nb_vbits_col-1]);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
and0_1 = vAND3(aa2, b2, cc2);
and3 = vAND3(and0_1, and0_0, and0_2);
c2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
and1_1 = vAND3(aa2, a2, cc2);
and4 = vAND3(and1_1, and1_0, and1_2);
b2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-4]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and2_1 = vAND3(aa2, c2, cc2);
and5 = vAND3(and2_1, and2_0, and2_2);
aa0 = vec_left1_bin(and4,and3);
cc0 = vec_right1_bin_unused_col(and3, _mm_bitshift_left(and3, 127-nb_unused_col),nb_unused_col);
or1 = vOR3(aa0, and3, cc0);
aa0 = vec_left1_bin(and5,and4);
cc0 = vec_right1_bin(and4,and3);
or1_bis = vOR3(aa0, and4, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[i+2][nb_vbits_col-1], y);
y = vOR3(or0_bis, or1_bis, or2_bis);
vec_store(&m[i+2][nb_vbits_col-2], y);
}
switch(n){
case 2:
and6 = vAND3(and0_1, and0_1, and0_0);
and7 = vAND3(and1_1, and1_1, and1_0);
and8 = vAND3(and2_1, and2_1, and2_0);
aa0 = vec_left1_bin(and7,and6);
cc0 = vec_right1_bin_unused_col(and6, _mm_bitshift_left(and6, 127-nb_unused_col),nb_unused_col);
or2 = vOR3(aa0, and6, cc0);
aa0 = vec_left1_bin(and8,and7);
cc0 = vec_right1_bin(and7,and6);
or2_bis = vOR3(aa0, and7, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[height-2][nb_vbits_col-1], y);
y = vOR3(or0_bis, or1_bis, or2_bis);
vec_store(&m[height-2][nb_vbits_col-2], y);
y = vOR3(or1, or2, or2);
vec_store(&m[height-1][nb_vbits_col-1], y);
y = vOR3(or1_bis, or2_bis, or2_bis);
vec_store(&m[height-1][nb_vbits_col-2], y);
break;
case 1:
y = vOR3(or0, or1, or1);
vec_store(&m[height-1][nb_vbits_col-1], y);
y = vOR3(or0_bis, or1_bis, or1_bis);
vec_store(&m[height-1][nb_vbits_col-2], y);
break;
case 0:
a2 = vec_load(&img_bin_extra_lines[height-1][nb_vbits_col-2]);
b2 = vec_load(&img_bin_extra_lines[height-1][nb_vbits_col-1]);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
and0_2 = vAND3(aa2, b2, cc2);
and6 = vAND3(and0_2, and0_1, and0_0);
c2 = vec_load(&img_bin_extra_lines[height-1][nb_vbits_col-3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
and1_2 = vAND3(aa2, a2, cc2);
and7 = vAND3(and1_2, and1_1, and1_0);
b2 = vec_load(&img_bin_extra_lines[height-1][nb_vbits_col-4]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
and2_2 = vAND3(aa2, c2, cc2);
and8 = vAND3(and2_2, and2_1, and2_0);
aa0 = vec_left1_bin(and7,and6);
cc0 = vec_right1_bin_unused_col(and6, _mm_bitshift_left(and6, 127-nb_unused_col),nb_unused_col);
or2 = vOR3(aa0, and6, cc0);
aa0 = vec_left1_bin(and8,and7);
cc0 = vec_right1_bin(and7,and6);
or2_bis = vOR3(aa0, and7, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[height-3][nb_vbits_col-1], y);
y = vOR3(or0_bis, or1_bis, or2_bis);
vec_store(&m[height-3][nb_vbits_col-2], y);
and0 = vAND3(and0_2, and0_2, and0_1);
and1 = vAND3(and1_2, and1_2, and1_1);
and2 = vAND3(and2_2, and2_2, and2_1);
aa0 = vec_left1_bin(and1,and0);
cc0 = vec_right1_bin_unused_col(and0, _mm_bitshift_left(and0, 127-nb_unused_col),nb_unused_col);
or0 = vOR3(aa0, and0, cc0);
aa0 = vec_left1_bin(and2,and1);
cc0 = vec_right1_bin(and1,and0);
or0_bis = vOR3(aa0, and1, cc0);
y = vOR3(or0, or1, or2);
vec_store(&m[height-2][nb_vbits_col-1], y);
y = vOR3(or0_bis, or1_bis, or2_bis);
vec_store(&m[height-2][nb_vbits_col-2], y);
y = vOR3(or2, or0, or0);
vec_store(&m[height-1][nb_vbits_col-1], y);
y = vOR3(or2_bis, or0_bis, or0_bis);
vec_store(&m[height-1][nb_vbits_col-2], y);
break;
default:
break;
}
_mm_free(img_bin_extra_lines);
return m;
}
vbits ** fermeture_fusion_SIMD(vbits** img_bin, int height, int width)
{
int nb_vbits_col = ceil((float)width/128);
int nb_unused_col = (128-(width%128))%128;
int n = height%3;
vbits **m = (vbits**)vui32matrix(0, height-1, 0, nb_vbits_col-1);
// Ajout d'un pointeur vers la ligne height-1 de l'image pour gérer plus facilement les bords
vbits **img_bin_extra_lines=(vbits **) _mm_malloc ((size_t)((height+1)*sizeof(vbits*)), 16);
if (!img_bin_extra_lines) vnrerror("allocation failure in ouverture_fusion_SIMD()");
for(int i = 0; i < height; i++)
img_bin_extra_lines[i] = img_bin[i];
img_bin_extra_lines[height] = img_bin_extra_lines[height-1];
vbits a0, a1, a2;
vbits b0, b1, b2;
vbits c0, c1, c2;
vbits aa0, aa1, aa2;
vbits cc0, cc1, cc2;
vbits and0, and1, and2, and0_bis, and1_bis, and2_bis, and0_ter, and1_ter, and2_ter;
vbits or0, or1, or2, or3, or4, or5, or6, or7, or8;
vbits or0_0, or0_1, or0_2, or1_0, or1_1, or1_2, or2_0, or2_1, or2_2;
vbits y;
// CAS DES IMAGES AYANT MOINS DE 128 COLONNES
if(nb_vbits_col == 1){
b0 = vec_load(&img_bin_extra_lines[0][0]);
b1 = vec_load(&img_bin_extra_lines[1][0]);
a0 = _mm_bitshift_right(b0, 127);
a1 = _mm_bitshift_right(b1, 127);
c0 = _mm_bitshift_left(b0, 127-nb_unused_col);
c1 = _mm_bitshift_left(b1, 127-nb_unused_col);
aa0 = vec_left1_bin(a0,b0);
aa1 = vec_left1_bin(a1,b1);
cc0 = vec_right1_bin(b0,c0);
cc1 = vec_right1_bin(b1,c1);
or0_0 = vOR3(aa0, b0, cc0);
or0_1 = vOR3(aa1, b1, cc1);
or0 = vOR3(or0_0, or0_0, or0_1);
or3 = or0;
aa0 = vec_left1_bin(_mm_bitshift_right(or0, 127),or0);
cc0 = vec_right1_bin_unused_col(or0, _mm_bitshift_left(or0, 127-nb_unused_col),nb_unused_col);
and0 = vAND3(aa0, or0, cc0);
and1 = and0;
for(int i = 0; i < height-3; i+=3){
b2 = vec_load(&img_bin_extra_lines[i+2][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
or0_2 = vOR3(aa2, b2, cc2);
or6 = vOR3(or0_2, or0_1, or0_0);
aa0 = vec_left1_bin(_mm_bitshift_right(or6, 127),or6);
cc0 = vec_right1_bin_unused_col(or6, _mm_bitshift_left(or6, 127-nb_unused_col),nb_unused_col);
and2 = vAND3(aa0, or6, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[i][0], y);
b2 = vec_load(&img_bin_extra_lines[i+3][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
or0_0 = vOR3(aa2, b2, cc2);
or0 = vOR3(or0_0, or0_2, or0_1);
aa0 = vec_left1_bin(_mm_bitshift_right(or0, 127),or0);
cc0 = vec_right1_bin_unused_col(or0, _mm_bitshift_left(or0, 127-nb_unused_col),nb_unused_col);
and0 = vAND3(aa0, or0, cc0);
y = vAND3(and0, and2, and1);
vec_store(&m[i+1][0], y);
b2 = vec_load(&img_bin_extra_lines[i+4][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
or0_1 = vOR3(aa2, b2, cc2);
or3 = vOR3(or0_1, or0_0, or0_2);
aa0 = vec_left1_bin(_mm_bitshift_right(or3, 127),or3);
cc0 = vec_right1_bin_unused_col(or3, _mm_bitshift_left(or3, 127-nb_unused_col),nb_unused_col);
and1 = vAND3(aa0, or3, cc0);
y = vAND3(and1, and0, and2);
vec_store(&m[i+2][0], y);
}
switch(n){
case 2 :
or6 = vOR3(or0_1, or0_1, or0_0);
aa0 = vec_left1_bin(_mm_bitshift_right(or6, 127),or6);
cc0 = vec_right1_bin_unused_col(or6, _mm_bitshift_left(or6, 127-nb_unused_col),nb_unused_col);
and2 = vAND3(aa0, or6, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[height-2][0], y);
y = vAND3(and1, and2, and2);
vec_store(&m[height-1][0], y);
break;
case 1 :
y = vAND3(and0, and1, and1);
vec_store(&m[height-1][0], y);
break;
case 0 :
b2 = vec_load(&img_bin_extra_lines[height-1][0]);
a2 = _mm_bitshift_right(b2, 127);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
or0_2 = vOR3(aa2, b2, cc2);
or6 = vOR3(or0_2, or0_1, or0_0);
aa0 = vec_left1_bin(_mm_bitshift_right(or6, 127),or6);
cc0 = vec_right1_bin_unused_col(or6, _mm_bitshift_left(or6, 127-nb_unused_col),nb_unused_col);
and2 = vAND3(aa0, or6, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[height-3][0], y);
or0 = vOR3(or0_2, or0_2, or0_1);
aa0 = vec_left1_bin(_mm_bitshift_right(or0, 127),or0);
cc0 = vec_right1_bin_unused_col(or0, _mm_bitshift_left(or0, 127-nb_unused_col),nb_unused_col);
and0 = vAND3(aa0, or0, cc0);
y = vAND3(and0, and2, and1);
vec_store(&m[height-2][0], y);
y = vAND3(and2, and0, and0);
vec_store(&m[height-1][0], y);
break;
}
_mm_free(img_bin_extra_lines);
return m;
}
// CAS DES IMAGES ENTRE 1*128+1 ET 2*128 COLONNES
if(nb_vbits_col == 2){
b0 = vec_load(&img_bin_extra_lines[0][0]);
b1 = vec_load(&img_bin_extra_lines[1][0]);
c0 = vec_load(&img_bin_extra_lines[0][1]);
c1 = vec_load(&img_bin_extra_lines[1][1]);
a0 = _mm_bitshift_right(b0, 127);
a1 = _mm_bitshift_right(b1, 127);
aa0 = vec_left1_bin(a0,b0);
aa1 = vec_left1_bin(a1,b1);
cc0 = vec_right1_bin(b0,c0);
cc1 = vec_right1_bin(b1,c1);
or0_0 = vOR3(aa0, b0, cc0);
or0_1 = vOR3(aa1, b1, cc1);
or0 = vOR3(or0_0, or0_0, or0_1);
or3 = or0;
a0 = _mm_bitshift_left(c0, 127-nb_unused_col);
a1 = _mm_bitshift_left(c1, 127-nb_unused_col);
aa0 = vec_left1_bin(b0,c0);
aa1 = vec_left1_bin(b1,c1);
cc0 = vec_right1_bin(c0,a0);
cc1 = vec_right1_bin(c1,a1);
or1_0 = vOR3(aa0, c0, cc0);
or1_1 = vOR3(aa1, c1, cc1);
or1 = vOR3(or1_0, or1_0, or1_1);
or4 = or1;
aa0 = vec_left1_bin(_mm_bitshift_right(or0, 127),or0);
cc0 = vec_right1_bin(or0,or1);
and0 = vAND3(aa0, or0, cc0);
aa0 = vec_left1_bin(or0,or1);
cc0 = vec_right1_bin_unused_col(or1, _mm_bitshift_left(or1, 127-nb_unused_col),nb_unused_col);
and0_bis = vAND3(aa0, or1, cc0);
and1 = and0;
and1_bis = and0_bis;
for(int i = 0; i < height-3; i+=3){
b2 = vec_load(&img_bin_extra_lines[i+2][0]);
c2 = vec_load(&img_bin_extra_lines[i+2][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_2 = vOR3(aa2, b2, cc2);
or6 = vOR3(or0_2, or0_1, or0_0);
a2 = _mm_bitshift_left(c2, 127-nb_unused_col);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin_unused_col(c2,a2,nb_unused_col);
or1_2 = vOR3(aa2, c2, cc2);
or7 = vOR3(or1_2, or1_1, or1_0);
aa0 = vec_left1_bin(_mm_bitshift_right(or6, 127),or6);
cc0 = vec_right1_bin(or6,or7);
and2 = vAND3(aa0, or6, cc0);
aa0 = vec_left1_bin(or6,or7);
cc0 = vec_right1_bin_unused_col(or7, _mm_bitshift_left(or7, 127-nb_unused_col),nb_unused_col);
and2_bis = vAND3(aa0, or7, cc0);
y = vAND3(and2, and0, and1);
vec_store(&m[i][0], y);
y = vAND3(and2_bis, and0_bis, and1_bis);
vec_store(&m[i][1], y);
b2 = vec_load(&img_bin_extra_lines[i+3][0]);
c2 = vec_load(&img_bin_extra_lines[i+3][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_0 = vOR3(aa2, b2, cc2);
or0 = vOR3(or0_0, or0_2, or0_1);
a2 = _mm_bitshift_left(c2, 127-nb_unused_col);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin_unused_col(c2,a2,nb_unused_col);
or1_0 = vOR3(aa2, c2, cc2);
or1 = vOR3(or1_0, or1_2, or1_1);
aa0 = vec_left1_bin(_mm_bitshift_right(or0, 127),or0);
cc0 = vec_right1_bin(or0,or1);
and0 = vAND3(aa0, or0, cc0);
aa0 = vec_left1_bin(or0,or1);
cc0 = vec_right1_bin_unused_col(or1, _mm_bitshift_left(or1, 127-nb_unused_col),nb_unused_col);
and0_bis = vAND3(aa0, or1, cc0);
y = vAND3(and0, and2, and1);
vec_store(&m[i+1][0], y);
y = vAND3(and0_bis, and2_bis, and1_bis);
vec_store(&m[i+1][1], y);
b2 = vec_load(&img_bin_extra_lines[i+4][0]);
c2 = vec_load(&img_bin_extra_lines[i+4][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_1 = vOR3(aa2, b2, cc2);
or3 = vOR3(or0_1, or0_0, or0_2);
a2 = _mm_bitshift_left(c2, 127-nb_unused_col);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin_unused_col(c2,a2,nb_unused_col);
or1_1 = vOR3(aa2, c2, cc2);
or4 = vOR3(or1_1, or1_0, or1_2);
aa0 = vec_left1_bin(_mm_bitshift_right(or3, 127),or3);
cc0 = vec_right1_bin(or3,or4);
and1 = vAND3(aa0, or3, cc0);
aa0 = vec_left1_bin(or3,or4);
cc0 = vec_right1_bin_unused_col(or4, _mm_bitshift_left(or4, 127-nb_unused_col),nb_unused_col);
and1_bis = vAND3(aa0, or4, cc0);
y = vAND3(and1, and0, and2);
vec_store(&m[i+2][0], y);
y = vAND3(and1_bis, and0_bis, and2_bis);
vec_store(&m[i+2][1], y);
}
switch(n){
case 2 :
or6 = vOR3(or0_1, or0_1, or0_0);
or7 = vOR3(or1_1, or1_1, or1_0);
aa0 = vec_left1_bin(_mm_bitshift_right(or6, 127),or6);
cc0 = vec_right1_bin(or6,or7);
and2 = vAND3(aa0, or6, cc0);
aa0 = vec_left1_bin(or6,or7);
cc0 = vec_right1_bin_unused_col(or7, _mm_bitshift_left(or7, 127-nb_unused_col),nb_unused_col);
and2_bis = vAND3(aa0, or7, cc0);
y = vAND3(and2, and0, and1);
vec_store(&m[height-2][0], y);
y = vAND3(and2_bis, and0_bis, and1_bis);
vec_store(&m[height-2][1], y);
y = vAND3(and1, and2, and2);
vec_store(&m[height-1][0], y);
y = vAND3(and1_bis, and2_bis, and2_bis);
vec_store(&m[height-1][1], y);
break;
case 1 :
y = vAND3(and0, and1, and1);
vec_store(&m[height-1][0], y);
y = vAND3(and0_bis, and1_bis, and1_bis);
vec_store(&m[height-1][1], y);
break;
case 0 :
b2 = vec_load(&img_bin_extra_lines[height-1][0]);
c2 = vec_load(&img_bin_extra_lines[height-1][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_2 = vOR3(aa2, b2, cc2);
or6 = vOR3(or0_0, or0_1, or0_0);
a2 = _mm_bitshift_left(c2, 127-nb_unused_col);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin_unused_col(c2,a2,nb_unused_col);
or1_2 = vOR3(aa2, c2, cc2);
or7 = vOR3(or1_2, or1_1, or1_0);
aa0 = vec_left1_bin(_mm_bitshift_right(or6, 127),or6);
cc0 = vec_right1_bin(or6,or7);
and2 = vAND3(aa0, or6, cc0);
aa0 = vec_left1_bin(or6,or7);
cc0 = vec_right1_bin_unused_col(or7, _mm_bitshift_left(or7, 127-nb_unused_col),nb_unused_col);
and2_bis = vAND3(aa0, or7, cc0);
y = vAND3(and2, and0, and1);
vec_store(&m[height-3][0], y);
y = vAND3(and2_bis, and0_bis, and1_bis);
vec_store(&m[height-3][1], y);
or0 = vOR3(or0_2, or0_2, or0_1);
or1 = vOR3(or1_2, or1_2, or1_1);
aa0 = vec_left1_bin(_mm_bitshift_right(or0, 127),or0);
cc0 = vec_right1_bin(or0,or1);
and0 = vAND3(aa0, or0, cc0);
aa0 = vec_left1_bin(or0,or1);
cc0 = vec_right1_bin_unused_col(or1, _mm_bitshift_left(or1, 127-nb_unused_col),nb_unused_col);
and0_bis = vAND3(aa0, or1, cc0);
y = vAND3(and0, and2, and1);
vec_store(&m[height-2][0], y);
y = vAND3(and0_bis, and2_bis, and1_bis);
vec_store(&m[height-2][1], y);
y = vAND3(and2, and0, and0);
vec_store(&m[height-1][0], y);
y = vAND3(and2_bis, and0_bis, and0_bis);
vec_store(&m[height-1][1], y);
break;
default:
break;
}
_mm_free(img_bin_extra_lines);
return m;
}
// CAS DES IMAGES ENTRE 2*128+1 ET 3*128 COLONNES
if(nb_vbits_col == 3){
b0 = vec_load(&img_bin_extra_lines[0][0]);
b1 = vec_load(&img_bin_extra_lines[1][0]);
c0 = vec_load(&img_bin_extra_lines[0][1]);
c1 = vec_load(&img_bin_extra_lines[1][1]);
a0 = _mm_bitshift_right(b0, 127);
a1 = _mm_bitshift_right(b1, 127);
aa0 = vec_left1_bin(a0,b0);
aa1 = vec_left1_bin(a1,b1);
cc0 = vec_right1_bin(b0,c0);
cc1 = vec_right1_bin(b1,c1);
or0_0 = vOR3(aa0, b0, cc0);
or0_1 = vOR3(aa1, b1, cc1);
or0 = vOR3(or0_0, or0_0, or0_1);
or3 = or0;
a0 = vec_load(&img_bin_extra_lines[0][2]);
a1 = vec_load(&img_bin_extra_lines[1][2]);
aa0 = vec_left1_bin(b0,c0);
aa1 = vec_left1_bin(b1,c1);
cc0 = vec_right1_bin(c0,a0);
cc1 = vec_right1_bin(c1,a1);
or1_0 = vOR3(aa0, c0, cc0);
or1_1 = vOR3(aa1, c1, cc1);
or1 = vOR3(or1_0, or1_0, or1_1);
or4 = or1;
b0 = _mm_bitshift_left(a0, 127-nb_unused_col);
b1 = _mm_bitshift_left(a1, 127-nb_unused_col);
aa0 = vec_left1_bin(c0,a0);
aa1 = vec_left1_bin(c1,a1);
cc0 = vec_right1_bin_unused_col(a0, b0, nb_unused_col);
cc1 = vec_right1_bin_unused_col(a1, b1, nb_unused_col);
or2_0 = vOR3(aa0, a0, cc0);
or2_1 = vOR3(aa1, a1, cc1);
or2 = vOR3(or2_0, or2_0, or2_1);
or5 = or2;
aa0 = vec_left1_bin(_mm_bitshift_right(or0, 127),or0);
cc0 = vec_right1_bin(or0,or1);
and0 = vAND3(aa0, or0, cc0);
aa0 = vec_left1_bin(or0,or1);
cc0 = vec_right1_bin(or1,or2);
and0_bis = vAND3(aa0, or1, cc0);
aa0 = vec_left1_bin(or1,or2);
cc0 = vec_right1_bin_unused_col(or2, _mm_bitshift_left(or2, 127-nb_unused_col),nb_unused_col);
and0_ter = vAND3(aa0, or2, cc0);
and1 = and0;
and1_bis = and0_bis;
and1_ter = and0_ter;
for(int i = 0; i < height-3; i+=3){
b2 = vec_load(&img_bin_extra_lines[i+2][0]);
c2 = vec_load(&img_bin_extra_lines[i+2][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_2 = vOR3(aa2, b2, cc2);
or6 = vOR3(or0_2, or0_1, or0_0);
a2 = vec_load(&img_bin_extra_lines[i+2][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or1_2 = vOR3(aa2, c2, cc2);
or7 = vOR3(or1_2, or1_1, or1_0);
b2 = _mm_bitshift_left(a2, 127-nb_unused_col);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin_unused_col(a2,b2,nb_unused_col);
or2_2 = vOR3(aa2, a2, cc2);
or8 = vOR3(or2_2, or2_1, or2_0);
aa0 = vec_left1_bin(_mm_bitshift_right(or6, 127),or6);
cc0 = vec_right1_bin(or6,or7);
and2 = vAND3(aa0, or6, cc0);
aa0 = vec_left1_bin(or6,or7);
cc0 = vec_right1_bin(or7,or8);
and2_bis = vAND3(aa0, or7, cc0);
aa0 = vec_left1_bin(or7,or8);
cc0 = vec_right1_bin_unused_col(or8, _mm_bitshift_left(or8, 127-nb_unused_col),nb_unused_col);
and2_ter = vAND3(aa0, or8, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[i][0], y);
y = vAND3(and0_bis, and1_bis, and2_bis);
vec_store(&m[i][1], y);
y = vAND3(and0_ter, and1_ter, and2_ter);
vec_store(&m[i][2], y);
b2 = vec_load(&img_bin_extra_lines[i+3][0]);
c2 = vec_load(&img_bin_extra_lines[i+3][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_0 = vOR3(aa2, b2, cc2);
or0 = vOR3(or0_0, or0_2, or0_1);
a2 = vec_load(&img_bin_extra_lines[i+3][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or1_0 = vOR3(aa2, c2, cc2);
or1 = vOR3(or1_0, or1_2, or1_1);
b2 = _mm_bitshift_left(a2, 127-nb_unused_col);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin_unused_col(a2,b2,nb_unused_col);
or2_0 = vOR3(aa2, a2, cc2);
or2 = vOR3(or2_0, or2_2, or2_1);
aa0 = vec_left1_bin(_mm_bitshift_right(or0, 127),or0);
cc0 = vec_right1_bin(or0,or1);
and0 = vAND3(aa0, or0, cc0);
aa0 = vec_left1_bin(or0,or1);
cc0 = vec_right1_bin(or1,or2);
and0_bis = vAND3(aa0, or1, cc0);
aa0 = vec_left1_bin(or1,or2);
cc0 = vec_right1_bin_unused_col(or2, _mm_bitshift_left(or2, 127-nb_unused_col),nb_unused_col);
and0_ter = vAND3(aa0, or2, cc0);
y = vAND3(and0, and2, and1);
vec_store(&m[i+1][0], y);
y = vAND3(and0_bis, and2_bis, and1_bis);
vec_store(&m[i+1][1], y);
y = vAND3(and0_ter, and2_ter, and1_ter);
vec_store(&m[i+1][2], y);
b2 = vec_load(&img_bin_extra_lines[i+4][0]);
c2 = vec_load(&img_bin_extra_lines[i+4][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_1 = vOR3(aa2, b2, cc2);
or3 = vOR3(or0_1, or0_0, or0_2);
a2 = vec_load(&img_bin_extra_lines[i+4][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or1_1 = vOR3(aa2, c2, cc2);
or4 = vOR3(or1_1, or1_0, or1_2);
b2 = _mm_bitshift_left(a2, 127-nb_unused_col);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin_unused_col(a2,b2,nb_unused_col);
or2_1 = vOR3(aa2, a2, cc2);
or5 = vOR3(or2_1, or2_0, or2_2);
aa0 = vec_left1_bin(_mm_bitshift_right(or3, 127),or3);
cc0 = vec_right1_bin(or3,or4);
and1 = vAND3(aa0, or3, cc0);
aa0 = vec_left1_bin(or3,or4);
cc0 = vec_right1_bin(or4,or5);
and1_bis = vAND3(aa0, or4, cc0);
aa0 = vec_left1_bin(or4,or5);
cc0 = vec_right1_bin_unused_col(or5, _mm_bitshift_left(or5, 127-nb_unused_col),nb_unused_col);
and1_ter = vAND3(aa0, or5, cc0);
y = vAND3(and1, and0, and2);
vec_store(&m[i+2][0], y);
y = vAND3(and1_bis, and0_bis, and2_bis);
vec_store(&m[i+2][1], y);
y = vAND3(and1_ter, and0_ter, and2_ter);
vec_store(&m[i+2][2], y);
}
switch (n) {
case 2:
or6 = vOR3(or0_1, or0_1, or0_0);
or7 = vOR3(or1_1, or1_1, or1_0);
or8 = vOR3(or2_1, or2_1, or2_0);
aa0 = vec_left1_bin(_mm_bitshift_right(or6, 127),or6);
cc0 = vec_right1_bin(or6,or7);
and2 = vAND3(aa0, or6, cc0);
aa0 = vec_left1_bin(or6,or7);
cc0 = vec_right1_bin(or7,or8);
and2_bis = vAND3(aa0, or7, cc0);
aa0 = vec_left1_bin(or7,or8);
cc0 = vec_right1_bin_unused_col(or8, _mm_bitshift_left(or8, 127-nb_unused_col),nb_unused_col);
and2_ter = vAND3(aa0, or8, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[height-2][0], y);
y = vAND3(and0_bis, and1_bis, and2_bis);
vec_store(&m[height-2][1], y);
y = vAND3(and0_ter, and1_ter, and2_ter);
vec_store(&m[height-2][2], y);
y = vAND3(and1, and2, and2);
vec_store(&m[height-1][0], y);
y = vAND3(and1_bis, and2_bis, and2_bis);
vec_store(&m[height-1][1], y);
y = vAND3(and1_ter, and2_ter, and2_ter);
vec_store(&m[height-1][2], y);
break;
case 1 :
y = vAND3(and0, and1, and1);
vec_store(&m[height-1][0], y);
y = vAND3(and0_bis, and1_bis, and1_bis);
vec_store(&m[height-1][1], y);
y = vAND3(and0_ter, and1_ter, and1_ter);
vec_store(&m[height-1][2], y);
break;
case 0 :
b2 = vec_load(&img_bin_extra_lines[height-1][0]);
c2 = vec_load(&img_bin_extra_lines[height-1][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_2 = vOR3(aa2, b2, cc2);
or6 = vOR3(or0_0, or0_2, or0_1);
a2 = vec_load(&img_bin_extra_lines[height-1][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or1_2 = vOR3(aa2, c2, cc2);
or7 = vOR3(or1_0, or1_2, or1_1);
b2 = _mm_bitshift_left(a2, 127-nb_unused_col);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin_unused_col(a2,b2,nb_unused_col);
or2_2 = vOR3(aa2, a2, cc2);
or8 = vOR3(or2_0, or2_2, or2_1);
aa0 = vec_left1_bin(_mm_bitshift_right(or6, 127),or6);
cc0 = vec_right1_bin(or6,or7);
and2 = vAND3(aa0, or6, cc0);
aa0 = vec_left1_bin(or6,or7);
cc0 = vec_right1_bin(or7,or8);
and2_bis = vAND3(aa0, or7, cc0);
aa0 = vec_left1_bin(or7,or8);
cc0 = vec_right1_bin_unused_col(or8, _mm_bitshift_left(or8, 127-nb_unused_col),nb_unused_col);
and2_ter = vAND3(aa0, or8, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[height-3][0], y);
y = vAND3(and0_bis, and1_bis, and2_bis);
vec_store(&m[height-3][1], y);
y = vAND3(and0_ter, and1_ter, and2_ter);
vec_store(&m[height-3][2], y);
or0 = vOR3(or0_2, or0_2, or0_1);
or1 = vOR3(or1_2, or1_2, or1_1);
or2 = vOR3(or2_2, or2_2, or2_1);
aa0 = vec_left1_bin(_mm_bitshift_right(or0, 127),or0);
cc0 = vec_right1_bin(or0,or1);
and0 = vAND3(aa0, or0, cc0);
aa0 = vec_left1_bin(or0,or1);
cc0 = vec_right1_bin(or1,or2);
and0_bis = vAND3(aa0, or1, cc0);
aa0 = vec_left1_bin(or1,or2);
cc0 = vec_right1_bin_unused_col(or2, _mm_bitshift_left(or2, 127-nb_unused_col),nb_unused_col);
and0_ter = vAND3(aa0, or2, cc0);
y = vAND3(and0, and2, and1);
vec_store(&m[height-2][0], y);
y = vAND3(and0_bis, and2_bis, and1_bis);
vec_store(&m[height-2][1], y);
y = vAND3(and0_ter, and2_ter, and1_ter);
vec_store(&m[height-2][2], y);
y = vAND3(and2, and0, and0);
vec_store(&m[height-1][0], y);
y = vAND3(and2_bis, and0_bis, and0_bis);
vec_store(&m[height-1][1], y);
y = vAND3(and2_ter, and0_ter, and0_ter);
vec_store(&m[height-1][2], y);
break;
}
_mm_free(img_bin_extra_lines);
return m;
}
// CAS DES IMAGES DE 4*128 COLONNES OU PLUS
// Première et deuxième colonne
b0 = vec_load(&img_bin_extra_lines[0][0]);
b1 = vec_load(&img_bin_extra_lines[1][0]);
c0 = vec_load(&img_bin_extra_lines[0][1]);
c1 = vec_load(&img_bin_extra_lines[1][1]);
a0 = _mm_bitshift_right(b0, 127);
a1 = _mm_bitshift_right(b1, 127);
aa0 = vec_left1_bin(a0,b0);
aa1 = vec_left1_bin(a1,b1);
cc0 = vec_right1_bin(b0,c0);
cc1 = vec_right1_bin(b1,c1);
or0_0 = vOR3(aa0, b0, cc0);
or0_1 = vOR3(aa1, b1, cc1);
or0 = vOR3(or0_0, or0_0, or0_1);
or3 = or0;
a0 = vec_load(&img_bin_extra_lines[0][2]);
a1 = vec_load(&img_bin_extra_lines[1][2]);
aa0 = vec_left1_bin(b0,c0);
aa1 = vec_left1_bin(b1,c1);
cc0 = vec_right1_bin(c0,a0);
cc1 = vec_right1_bin(c1,a1);
or1_0 = vOR3(aa0, c0, cc0);
or1_1 = vOR3(aa1, c1, cc1);
or1 = vOR3(or1_0, or1_0, or1_1);
or4 = or1;
b0 = vec_load(&img_bin_extra_lines[0][3]);
b1 = vec_load(&img_bin_extra_lines[1][3]);
aa0 = vec_left1_bin(c0,a0);
aa1 = vec_left1_bin(c1,a1);
cc0 = vec_right1_bin(a0,b0);
cc1 = vec_right1_bin(a1,b1);
or2_0 = vOR3(aa0, a0, cc0);
or2_1 = vOR3(aa1, a1, cc1);
or2 = vOR3(or2_0, or2_0, or2_1);
or5 = or2;
aa0 = vec_left1_bin(_mm_bitshift_right(or0, 127),or0);
cc0 = vec_right1_bin(or0,or1);
and0 = vAND3(aa0, or0, cc0);
aa0 = vec_left1_bin(or0,or1);
cc0 = vec_right1_bin(or1,or2);
and0_bis = vAND3(aa0, or1, cc0);
and1 = and0;
and1_bis = and0_bis;
for(int i = 0; i < height-3; i+=3){
b2 = vec_load(&img_bin_extra_lines[i+2][0]);
c2 = vec_load(&img_bin_extra_lines[i+2][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_2 = vOR3(aa2, b2, cc2);
or6 = vOR3(or0_2, or0_1, or0_0);
a2 = vec_load(&img_bin_extra_lines[i+2][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or1_2 = vOR3(aa2, c2, cc2);
or7 = vOR3(or1_2, or1_1, or1_0);
b2 = vec_load(&img_bin_extra_lines[i+2][3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
or2_2 = vOR3(aa2, a2, cc2);
or8 = vOR3(or2_2, or2_1, or2_0);
aa0 = vec_left1_bin(_mm_bitshift_right(or6, 127),or6);
cc0 = vec_right1_bin(or6,or7);
and2 = vAND3(aa0, or6, cc0);
aa0 = vec_left1_bin(or6,or7);
cc0 = vec_right1_bin(or7,or8);
and2_bis = vAND3(aa0, or7, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[i][0], y);
y = vAND3(and0_bis, and1_bis, and2_bis);
vec_store(&m[i][1], y);
b2 = vec_load(&img_bin_extra_lines[i+3][0]);
c2 = vec_load(&img_bin_extra_lines[i+3][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_0 = vOR3(aa2, b2, cc2);
or0 = vOR3(or0_0, or0_2, or0_1);
a2 = vec_load(&img_bin_extra_lines[i+3][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or1_0 = vOR3(aa2, c2, cc2);
or1 = vOR3(or1_0, or1_2, or1_1);
b2 = vec_load(&img_bin_extra_lines[i+3][3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
or2_0 = vOR3(aa2, a2, cc2);
or2 = vOR3(or2_0, or2_2, or2_1);
aa0 = vec_left1_bin(_mm_bitshift_right(or0, 127),or0);
cc0 = vec_right1_bin(or0,or1);
and0 = vAND3(aa0, or0, cc0);
aa0 = vec_left1_bin(or0,or1);
cc0 = vec_right1_bin(or1,or2);
and0_bis = vAND3(aa0, or1, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[i+1][0], y);
y = vAND3(and0_bis, and1_bis, and2_bis);
vec_store(&m[i+1][1], y);
b2 = vec_load(&img_bin_extra_lines[i+4][0]);
c2 = vec_load(&img_bin_extra_lines[i+4][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_1 = vOR3(aa2, b2, cc2);
or3 = vOR3(or0_1, or0_0, or0_2);
a2 = vec_load(&img_bin_extra_lines[i+4][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or1_1 = vOR3(aa2, c2, cc2);
or4 = vOR3(or1_1, or1_0, or1_2);
b2 = vec_load(&img_bin_extra_lines[i+4][3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
or2_1 = vOR3(aa2, a2, cc2);
or5 = vOR3(or2_1, or2_0, or2_2);
aa0 = vec_left1_bin(_mm_bitshift_right(or3, 127),or3);
cc0 = vec_right1_bin(or3,or4);
and1 = vAND3(aa0, or3, cc0);
aa0 = vec_left1_bin(or3,or4);
cc0 = vec_right1_bin(or4,or5);
and1_bis = vAND3(aa0, or4, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[i+2][0], y);
y = vAND3(and0_bis, and1_bis, and2_bis);
vec_store(&m[i+2][1], y);
}
switch(n) {
case 2 :
or6 = vOR3(or0_1, or0_1, or0_0);
or7 = vOR3(or1_1, or1_1, or1_0);
or8 = vOR3(or2_1, or2_1, or2_0);
aa0 = vec_left1_bin(_mm_bitshift_right(or6, 127),or6);
cc0 = vec_right1_bin(or6,or7);
and2 = vAND3(aa0, or6, cc0);
aa0 = vec_left1_bin(or6,or7);
cc0 = vec_right1_bin(or7,or8);
and2_bis = vAND3(aa0, or7, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[height-2][0], y);
y = vAND3(and0_bis, and1_bis, and2_bis);
vec_store(&m[height-2][1], y);
y = vAND3(and1, and2, and2);
vec_store(&m[height-1][0], y);
y = vAND3(and1_bis, and2_bis, and2_bis);
vec_store(&m[height-1][1], y);
break;
case 1 :
y = vAND3(and0, and1, and1);
vec_store(&m[height-1][0], y);
y = vAND3(and0_bis, and1_bis, and1_bis);
vec_store(&m[height-1][1], y);
break;
case 0:
b2 = vec_load(&img_bin_extra_lines[height-1][0]);
c2 = vec_load(&img_bin_extra_lines[height-1][1]);
a2 = _mm_bitshift_right(b2, 127);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_2 = vOR3(aa2, b2, cc2);
or6 = vOR3(or0_2, or0_1, or0_0);
a2 = vec_load(&img_bin_extra_lines[height-1][2]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or1_2 = vOR3(aa2, c2, cc2);
or7 = vOR3(or1_2, or1_1, or1_0);
b2 = vec_load(&img_bin_extra_lines[height-1][3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
or2_2 = vOR3(aa2, a2, cc2);
or8 = vOR3(or2_2, or2_1, or2_0);
aa0 = vec_left1_bin(_mm_bitshift_right(or6, 127),or6);
cc0 = vec_right1_bin(or6,or7);
and2 = vAND3(aa0, or6, cc0);
aa0 = vec_left1_bin(or6,or7);
cc0 = vec_right1_bin(or7,or8);
and2_bis = vAND3(aa0, or7, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[height-3][0], y);
y = vAND3(and0_bis, and1_bis, and2_bis);
vec_store(&m[height-3][1], y);
or0 = vOR3(or0_2, or0_2, or0_1);
or1 = vOR3(or1_2, or1_2, or1_1);
or2 = vOR3(or2_2, or2_2, or2_1);
aa0 = vec_left1_bin(_mm_bitshift_right(or0, 127),or0);
cc0 = vec_right1_bin(or0,or1);
and0 = vAND3(aa0, or0, cc0);
aa0 = vec_left1_bin(or0,or1);
cc0 = vec_right1_bin(or1,or2);
and0_bis = vAND3(aa0, or1, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[height-2][0], y);
y = vAND3(and0_bis, and1_bis, and2_bis);
vec_store(&m[height-2][1], y);
y = vAND3(and2, and0, and0);
vec_store(&m[height-1][0], y);
y = vAND3(and2_bis, and0_bis, and0_bis);
vec_store(&m[height-1][1], y);
break;
default:
break;
}
for(int j = 2; j < nb_vbits_col-2; j++){
a0 = vec_load(&img_bin_extra_lines[0][j-2]);
a1 = vec_load(&img_bin_extra_lines[1][j-2]);
b0 = vec_load(&img_bin_extra_lines[0][j-1]);
b1 = vec_load(&img_bin_extra_lines[1][j-1]);
c0 = vec_load(&img_bin_extra_lines[0][j-0]);
c1 = vec_load(&img_bin_extra_lines[1][j-0]);
aa0 = vec_left1_bin(a0,b0);
aa1 = vec_left1_bin(a1,b1);
cc0 = vec_right1_bin(b0,c0);
cc1 = vec_right1_bin(b1,c1);
or0_0 = vOR3(aa0, b0, cc0);
or0_1 = vOR3(aa1, b1, cc1);
or0 = vOR3(or0_0, or0_0, or0_1);
or3 = or0;
a0 = vec_load(&img_bin_extra_lines[0][j+1]);
a1 = vec_load(&img_bin_extra_lines[1][j+1]);
aa0 = vec_left1_bin(b0,c0);
aa1 = vec_left1_bin(b1,c1);
cc0 = vec_right1_bin(c0,a0);
cc1 = vec_right1_bin(c1,a1);
or1_0 = vOR3(aa0, c0, cc0);
or1_1 = vOR3(aa1, c1, cc1);
or1 = vOR3(or1_0, or1_0, or1_1);
or4 = or1;
b0 = vec_load(&img_bin_extra_lines[0][j+2]);
b1 = vec_load(&img_bin_extra_lines[1][j+2]);
aa0 = vec_left1_bin(c0,a0);
aa1 = vec_left1_bin(c1,a1);
cc0 = vec_right1_bin(a0,b0);
cc1 = vec_right1_bin(a1,b1);
or2_0 = vOR3(aa0, a0, cc0);
or2_1 = vOR3(aa1, a1, cc1);
or2 = vOR3(or2_0, or2_0, or2_1);
or5 = or2;
aa0 = vec_left1_bin(or0, or1);
cc0 = vec_right1_bin(or1, or2);
and0 = vAND3(aa0, or1, cc0);
and1 = and0;
for(int i = 0; i < height-3; i+=3){
a2 = vec_load(&img_bin_extra_lines[i+2][j-2]);
b2 = vec_load(&img_bin_extra_lines[i+2][j-1]);
c2 = vec_load(&img_bin_extra_lines[i+2][j-0]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_2 = vOR3(aa2, b2, cc2);
or6 = vOR3(or0_2, or0_1, or0_0);
a2 = vec_load(&img_bin_extra_lines[i+2][j+1]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or1_2 = vOR3(aa2, c2, cc2);
or7 = vOR3(or1_2, or1_1, or1_0);
b2 = vec_load(&img_bin_extra_lines[i+2][j+2]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
or2_2 = vOR3(aa2, a2, cc2);
or8 = vOR3(or2_2, or2_1, or2_0);
aa2 = vec_left1_bin(or6, or7);
cc2 = vec_right1_bin(or7, or8);
and2 = vAND3(aa2, or7, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[i][j], y);
a2 = vec_load(&img_bin_extra_lines[i+3][j-2]);
b2 = vec_load(&img_bin_extra_lines[i+3][j-1]);
c2 = vec_load(&img_bin_extra_lines[i+3][j-0]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_0 = vOR3(aa2, b2, cc2);
or0 = vOR3(or0_0, or0_2, or0_1);
a2 = vec_load(&img_bin_extra_lines[i+3][j+1]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or1_0 = vOR3(aa2, c2, cc2);
or1 = vOR3(or1_0, or1_2, or1_1);
b2 = vec_load(&img_bin_extra_lines[i+3][j+2]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
or2_0 = vOR3(aa2, a2, cc2);
or2 = vOR3(or2_0, or2_2, or2_1);
aa2 = vec_left1_bin(or0, or1);
cc2 = vec_right1_bin(or1, or2);
and0 = vAND3(aa2, or1, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[i+1][j], y);
a2 = vec_load(&img_bin_extra_lines[i+4][j-2]);
b2 = vec_load(&img_bin_extra_lines[i+4][j-1]);
c2 = vec_load(&img_bin_extra_lines[i+4][j-0]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_1 = vOR3(aa2, b2, cc2);
or3 = vOR3(or0_1, or0_0, or0_2);
a2 = vec_load(&img_bin_extra_lines[i+4][j+1]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or1_1 = vOR3(aa2, c2, cc2);
or4 = vOR3(or1_1, or1_0, or1_2);
b2 = vec_load(&img_bin_extra_lines[i+4][j+2]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
or2_1 = vOR3(aa2, a2, cc2);
or5 = vOR3(or2_1, or2_0, or2_2);
aa2 = vec_left1_bin(or3, or4);
cc2 = vec_right1_bin(or4, or5);
and1 = vAND3(aa2, or4, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[i+2][j], y);
}
switch(n){
case 2:
or6 = vOR3(or0_1, or0_1, or0_0);
or7 = vOR3(or1_1, or1_1, or1_0);
or8 = vOR3(or2_1, or2_1, or2_0);
aa2 = vec_left1_bin(or6, or7);
cc2 = vec_right1_bin(or7, or8);
and2 = vAND3(aa2, or7, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[height-2][j], y);
y = vAND3(and1, and2, and2);
vec_store(&m[height-1][j], y);
break;
case 1:
y = vAND3(and0, and1, and1);
vec_store(&m[height-1][j], y);
break;
case 0:
a2 = vec_load(&img_bin_extra_lines[height-1][j-2]);
b2 = vec_load(&img_bin_extra_lines[height-1][j-1]);
c2 = vec_load(&img_bin_extra_lines[height-1][j-0]);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin(b2,c2);
or0_2 = vOR3(aa2, b2, cc2);
or6 = vOR3(or0_2, or0_1, or0_0);
a2 = vec_load(&img_bin_extra_lines[height-1][j+1]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or1_2 = vOR3(aa2, c2, cc2);
or7 = vOR3(or1_2, or1_1, or1_0);
b2 = vec_load(&img_bin_extra_lines[height-1][j+2]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
or2_2 = vOR3(aa2, a2, cc2);
or8 = vOR3(or2_2, or2_1, or2_0);
aa2 = vec_left1_bin(or6, or7);
cc2 = vec_right1_bin(or7, or8);
and2 = vAND3(aa2, or7, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[height-3][j], y);
or0 = vOR3(or0_2, or0_2, or0_1);
or1 = vOR3(or1_2, or1_2, or1_1);
or2 = vOR3(or2_2, or2_2, or2_1);
aa2 = vec_left1_bin(or0, or1);
cc2 = vec_right1_bin(or1, or2);
and0 = vAND3(aa2, or1, cc2);
y = vAND3(and0, and1, and2);
vec_store(&m[height-2][j], y);
y = vAND3(and2, and0, and0);
vec_store(&m[height-1][j], y);
break;
default:
break;
}
}
// Avant dernière et dernière colonne
b0 = vec_load(&img_bin_extra_lines[0][nb_vbits_col-1]);
b1 = vec_load(&img_bin_extra_lines[1][nb_vbits_col-1]);
a0 = vec_load(&img_bin_extra_lines[0][nb_vbits_col-2]);
a1 = vec_load(&img_bin_extra_lines[1][nb_vbits_col-2]);
c0 = _mm_bitshift_left(b0, 127-nb_unused_col);
c1 = _mm_bitshift_left(b1, 127-nb_unused_col);
aa0 = vec_left1_bin(a0,b0);
aa1 = vec_left1_bin(a1,b1);
cc0 = vec_right1_bin_unused_col(b0,c0,nb_unused_col);
cc1 = vec_right1_bin_unused_col(b1,c1,nb_unused_col);
or0_0 = vOR3(aa0, b0, cc0);
or0_1 = vOR3(aa1, b1, cc1);
or0 = vOR3(or0_0, or0_0, or0_1);
or3 = or0;
c0 = vec_load(&img_bin_extra_lines[0][nb_vbits_col-3]);
c1 = vec_load(&img_bin_extra_lines[1][nb_vbits_col-3]);
aa0 = vec_left1_bin(c0,a0);
aa1 = vec_left1_bin(c1,a1);
cc0 = vec_right1_bin(a0,b0);
cc1 = vec_right1_bin(a1,b1);
or1_0 = vOR3(aa0, a0, cc0);
or1_1 = vOR3(aa1, a1, cc1);
or1 = vOR3(or1_0, or1_0, or1_1);
or4 = or1;
b0 = vec_load(&img_bin_extra_lines[0][nb_vbits_col-4]);
b1 = vec_load(&img_bin_extra_lines[1][nb_vbits_col-4]);
aa0 = vec_left1_bin(b0,c0);
aa1 = vec_left1_bin(b1,c1);
cc0 = vec_right1_bin(c0,a0);
cc1 = vec_right1_bin(c1,a1);
or2_0 = vOR3(aa0, c0, cc0);
or2_1 = vOR3(aa1, c1, cc1);
or2 = vOR3(or2_0, or2_0, or2_1);
or5 = or2;
aa0 = vec_left1_bin(or1,or0);
cc0 = vec_right1_bin_unused_col(or0, _mm_bitshift_left(or0, 127-nb_unused_col),nb_unused_col);
and0 = vAND3(aa0, or0, cc0);
aa0 = vec_left1_bin(or2,or1);
cc0 = vec_right1_bin(or1,or0);
and0_bis = vAND3(aa0, or1, cc0);
and1 = and0;
and1_bis = and0_bis;
for(int i = 0; i < height-3; i+=3){
a2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-2]);
b2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-1]);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
or0_2 = vOR3(aa2, b2, cc2);
or6 = vOR3(or0_2, or0_1, or0_0);
c2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
or1_2 = vOR3(aa2, a2, cc2);
or7 = vOR3(or1_2, or1_1, or1_0);
b2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-4]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or2_2 = vOR3(aa2, c2, cc2);
or8 = vOR3(or2_2, or2_1, or2_0);
aa0 = vec_left1_bin(or7,or6);
cc0 = vec_right1_bin_unused_col(or6, _mm_bitshift_left(or6, 127-nb_unused_col),nb_unused_col);
and2 = vAND3(aa0, or6, cc0);
aa0 = vec_left1_bin(or8,or7);
cc0 = vec_right1_bin(or7,or6);
and2_bis = vAND3(aa0, or7, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[i][nb_vbits_col-1], y);
y = vAND3(and0_bis, and1_bis, and2_bis);
vec_store(&m[i][nb_vbits_col-2], y);
a2 = vec_load(&img_bin_extra_lines[i+3][nb_vbits_col-2]);
b2 = vec_load(&img_bin_extra_lines[i+3][nb_vbits_col-1]);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
or0_0 = vOR3(aa2, b2, cc2);
or0 = vOR3(or0_0, or0_2, or0_1);
c2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
or1_0 = vOR3(aa2, a2, cc2);
or1 = vOR3(or1_0, or1_2, or1_1);
b2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-4]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or2_0 = vOR3(aa2, c2, cc2);
or2 = vOR3(or2_0, or2_2, or2_1);
aa0 = vec_left1_bin(or1,or0);
cc0 = vec_right1_bin_unused_col(or0, _mm_bitshift_left(or0, 127-nb_unused_col),nb_unused_col);
and0 = vAND3(aa0, or0, cc0);
aa0 = vec_left1_bin(or2,or1);
cc0 = vec_right1_bin(or1,or0);
and0_bis = vAND3(aa0, or1, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[i+1][nb_vbits_col-1], y);
y = vAND3(and0_bis, and1_bis, and2_bis);
vec_store(&m[i+1][nb_vbits_col-2], y);
a2 = vec_load(&img_bin_extra_lines[i+4][nb_vbits_col-2]);
b2 = vec_load(&img_bin_extra_lines[i+4][nb_vbits_col-1]);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
or0_1 = vOR3(aa2, b2, cc2);
or3 = vOR3(or0_1, or0_0, or0_2);
c2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
or1_1 = vOR3(aa2, a2, cc2);
or4 = vOR3(or1_1, or1_0, or1_2);
b2 = vec_load(&img_bin_extra_lines[i+2][nb_vbits_col-4]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or2_1 = vOR3(aa2, c2, cc2);
or5 = vOR3(or2_1, or2_0, or2_2);
aa0 = vec_left1_bin(or4,or3);
cc0 = vec_right1_bin_unused_col(or3, _mm_bitshift_left(or3, 127-nb_unused_col),nb_unused_col);
and1 = vAND3(aa0, or3, cc0);
aa0 = vec_left1_bin(or5,or4);
cc0 = vec_right1_bin(or4,or3);
and1_bis = vAND3(aa0, or4, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[i+2][nb_vbits_col-1], y);
y = vAND3(and0_bis, and1_bis, and2_bis);
vec_store(&m[i+2][nb_vbits_col-2], y);
}
switch(n){
case 2:
or6 = vOR3(or0_1, or0_1, or0_0);
or7 = vOR3(or1_1, or1_1, or1_0);
or8 = vOR3(or2_1, or2_1, or2_0);
aa0 = vec_left1_bin(or7,or6);
cc0 = vec_right1_bin_unused_col(or6, _mm_bitshift_left(or6, 127-nb_unused_col),nb_unused_col);
and2 = vAND3(aa0, or6, cc0);
aa0 = vec_left1_bin(or8,or7);
cc0 = vec_right1_bin(or7,or6);
and2_bis = vAND3(aa0, or7, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[height-2][nb_vbits_col-1], y);
y = vAND3(and0_bis, and1_bis, and2_bis);
vec_store(&m[height-2][nb_vbits_col-2], y);
y = vAND3(and1, and2, and2);
vec_store(&m[height-1][nb_vbits_col-1], y);
y = vAND3(and1_bis, and2_bis, and2_bis);
vec_store(&m[height-1][nb_vbits_col-2], y);
break;
case 1:
y = vAND3(and0, and1, and1);
vec_store(&m[height-1][nb_vbits_col-1], y);
y = vAND3(and0_bis, and1_bis, and1_bis);
vec_store(&m[height-1][nb_vbits_col-2], y);
break;
case 0:
a2 = vec_load(&img_bin_extra_lines[height-1][nb_vbits_col-2]);
b2 = vec_load(&img_bin_extra_lines[height-1][nb_vbits_col-1]);
c2 = _mm_bitshift_left(b2, 127-nb_unused_col);
aa2 = vec_left1_bin(a2,b2);
cc2 = vec_right1_bin_unused_col(b2,c2,nb_unused_col);
or0_2 = vOR3(aa2, b2, cc2);
or6 = vOR3(or0_2, or0_1, or0_0);
c2 = vec_load(&img_bin_extra_lines[height-1][nb_vbits_col-3]);
aa2 = vec_left1_bin(c2,a2);
cc2 = vec_right1_bin(a2,b2);
or1_2 = vOR3(aa2, a2, cc2);
or7 = vOR3(or1_2, or1_1, or1_0);
b2 = vec_load(&img_bin_extra_lines[height-1][nb_vbits_col-4]);
aa2 = vec_left1_bin(b2,c2);
cc2 = vec_right1_bin(c2,a2);
or2_2 = vOR3(aa2, c2, cc2);
or8 = vOR3(or2_2, or2_1, or2_0);
aa0 = vec_left1_bin(or7,or6);
cc0 = vec_right1_bin_unused_col(or6, _mm_bitshift_left(or6, 127-nb_unused_col),nb_unused_col);
and2 = vAND3(aa0, or6, cc0);
aa0 = vec_left1_bin(or8,or7);
cc0 = vec_right1_bin(or7,or6);
and2_bis = vAND3(aa0, or7, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[height-3][nb_vbits_col-1], y);
y = vAND3(and0_bis, and1_bis, and2_bis);
vec_store(&m[height-3][nb_vbits_col-2], y);
or0 = vOR3(or0_2, or0_2, or0_1);
or1 = vOR3(or1_2, or1_2, or1_1);
or2 = vOR3(or2_2, or2_2, or2_1);
aa0 = vec_left1_bin(or1,or0);
cc0 = vec_right1_bin_unused_col(or0, _mm_bitshift_left(or0, 127-nb_unused_col),nb_unused_col);
and0 = vAND3(aa0, or0, cc0);
aa0 = vec_left1_bin(or2,or1);
cc0 = vec_right1_bin(or1,or0);
and0_bis = vAND3(aa0, or1, cc0);
y = vAND3(and0, and1, and2);
vec_store(&m[height-2][nb_vbits_col-1], y);
y = vAND3(and0_bis, and1_bis, and2_bis);
vec_store(&m[height-2][nb_vbits_col-2], y);
y = vAND3(and2, and0, and0);
vec_store(&m[height-1][nb_vbits_col-1], y);
y = vAND3(and2_bis, and0_bis, and0_bis);
vec_store(&m[height-1][nb_vbits_col-2], y);
break;
default:
break;
}
_mm_free(img_bin_extra_lines);
return m;
}
// ero - dil - dil - ero
// fermture(ouverture)
vbits ** chaine_complete_opti_SIMD(vbits** img_bin, int height, int width)
{
vbits ** ouverture = ouverture_opti_SIMD(img_bin, height, width);
vbits ** fermeture = fermeture_opti_SIMD(ouverture, height, width);
free_vbitsmatrix(ouverture, height, width);
#ifdef BENCH
free_vbitsmatrix(fermeture, height, width);
#endif
return fermeture;
}
vbits ** chaine_complete_fusion_SIMD(vbits** img_bin, int height, int width)
{
vbits ** ouverture = ouverture_fusion_SIMD(img_bin, height, width);
vbits ** fermeture = fermeture_fusion_SIMD(ouverture, height, width);
free_vbitsmatrix(ouverture, height, width);
#ifdef BENCH
free_vbitsmatrix(fermeture, height, width);
#endif
return fermeture;
}
|
GB_unaryop__identity_bool_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_bool_uint32
// op(A') function: GB_tran__identity_bool_uint32
// C type: bool
// A type: uint32_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
bool z = (bool) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_bool_uint32
(
bool *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_bool_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.