source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
outer_mult.h | #include "CSC.h"
#include "CSR.h"
#include "Triple.h"
#include "radix_sort/radix_sort.hpp"
#include "utility.h"
#include <map>
#include <algorithm>
#include <iostream>
#include <omp.h>
#include <unistd.h>
#include <cstring>
//#include <numa.h>
#include <thread>
#include <cstdlib>
#include <chrono>
#include<set>
using namespace std;
static uint32_t nrows_of_A;
static uint32_t nrows_per_blocker;
// static int *rows_to_blockers;
// static int *flops_by_row_blockers;
#define TBB true
#define SIZE 16
template <typename IT>
uint16_t fast_mod(const IT input, const int ceil) {
return input >= ceil ? input % ceil : input;
}
template <typename IT, typename NT>
uint64_t getFlop(const CSC<IT, NT>& A, const CSR<IT, NT>& B)
{
uint64_t flop = 0;
#pragma omp parallel for reduction(+ : flop)
for (IT i = 0; i < A.cols; ++i)
{
IT colnnz = A.colptr[i + 1] - A.colptr[i];
IT rownnz = B.rowptr[i + 1] - B.rowptr[i];
flop += (colnnz * rownnz);
}
return flop;
}
template <typename IT, typename NT>
void do_static_symbolic(const CSC<IT, NT>& A, const CSR<IT, NT>& B, IT startIdx, IT endIdx,
uint32_t nrows_per_blocker, uint16_t num_blockers,
IT *flops_by_row_blockers, IT& total_flops)
{
#pragma omp parallel for reduction(+ : flops_by_row_blockers[:num_blockers])
for (IT i = startIdx; i < endIdx; ++i)
{
IT rownnz = B.rowptr[i + 1] - B.rowptr[i];
for (IT j = A.colptr[i]; j < A.colptr[i + 1]; ++j)
{
uint16_t row_blocker_id = A.rowids[j] / nrows_per_blocker;
// cout << "row_blocker_id " << row_blocker_id << endl;
flops_by_row_blockers[row_blocker_id] += rownnz;
}
}
for (IT i = 0; i < num_blockers; ++i)
{
total_flops += flops_by_row_blockers[i];
}
}
template <typename IT, typename NT>
void do_symbolic(const CSC<IT, NT>& A, const CSR<IT, NT>& B, IT startIdx, IT endIdx,
uint16_t num_blockers, IT* flops_by_rows, IT* rows_to_blockers,
IT* flops_by_row_blockers, IT& total_flops)
{
double avg_volumn = 0.0;
double cur_volumn = 0.0;
uint16_t cur_blocker_id = 0;
// #pragma omp parallel
{
// #pragma omp for reduction(+ : flops_by_rows[:A.rows])
for (IT i = startIdx; i < endIdx; ++i)
{
IT rownnz = B.rowptr[i + 1] - B.rowptr[i];
for (IT j = A.colptr[i]; j < A.colptr[i + 1]; ++j)
{
flops_by_rows[A.rowids[j]] += rownnz;
}
}
#pragma omp parallel for reduction(+ : total_flops)
for (IT i = 0; i<A.rows; ++i)
total_flops += flops_by_rows[i];
}
avg_volumn = total_flops / num_blockers;
rows_to_blockers[0] = 0;
for (IT i=0; i<A.rows; ++i)
{
cur_volumn += flops_by_rows[i];
flops_by_row_blockers[cur_blocker_id] = cur_volumn;
rows_to_blockers[i] = cur_blocker_id;
if (cur_volumn > avg_volumn)
{
cur_blocker_id ++;
cur_volumn = 0;
}
}
// for (uint16_t i = 0 ; i< num_blockers; ++i)
// cout << "BlockerId = " << i << " RowId = " << rows_to_blockers[i] << endl;
}
template <typename IT, typename NT>
int64_t getReqMemory(const CSC<IT, NT>& A, const CSR<IT, NT>& B)
{
uint64_t flop = getFlop(A, B);
return flop * sizeof(int64_t);
}
struct ExtractKey
{
inline int64_t operator()(tuple<int32_t, int32_t, double> tup)
{
int64_t res = std::get<0>(tup);
res = (res << 32);
res = res | (int64_t)(uint32_t) std::get<1>(tup);
return res;
}
};
struct ExtractKey2
{
inline uint32_t operator()(tuple<int32_t, int32_t, double> tup)
{
// 32768 for S23
// 256 for S16
return ((std::get<0>(tup) % nrows_per_blocker) << 20 | (uint32_t) std::get<1>(tup));
// return (std::get<0>(tup) << 16) | ((uint32_t) std::get<1>(tup));
// return ((std::get<0>(tup) % flops_by_row_blockers[rows_to_blockers[std::get<0>(tup)]] << 16) | ((uint32_t) std::get<1>(tup)));
// return (((rows_to_blockers[std::get<0>(tup)] % (flops_by_row_blockers[std::get<0>(tup)])) << 20) | ((uint32_t) std::get<1>(tup)));
}
};
template <typename IT, typename NT>
bool compareTuple (tuple<IT, IT, NT> t1, tuple<IT, IT, NT> t2)
{
if (std::get<0>(t1) < std::get<0>(t2))
return true;
else if (std::get<0>(t1) == std::get<0>(t2) && std::get<1>(t1) < std::get<1>(t2))
return true;
return false;
// if (std::get<1>(t1) != std::get<1>(t2))
// return false;
// if (std::get<0>(t1) != std::get<0>(t2))
// return false;
// return true;
}
template <typename IT, typename NT>
inline bool isTupleEqual (tuple<IT, IT, NT> t1, tuple<IT, IT, NT> t2)
{
if (std::get<1>(t1) != std::get<1>(t2))
return false;
if (std::get<0>(t1) != std::get<0>(t2))
return false;
return true;
}
template <typename IT, typename NT>
inline void doRadixSort(tuple<IT, IT, NT>* begin, tuple<IT, IT, NT>* end, tuple<IT, IT, NT>* buffer)
{
radix_sort(begin, end, buffer, ExtractKey2());
// sort(begin, end, compareTuple<IT, NT>);
}
template <typename IT, typename NT>
inline IT doMerge(tuple<IT, IT, NT>* vec, IT length)
{
if (length == 0) return 0;
ExtractKey op = ExtractKey();
IT i = 0;
IT j = 1;
while (i < length && j < length)
{
if (j < length && isTupleEqual (vec[i], vec[j]))
std::get<2>(vec[i]) += std::get<2>(vec[j]);
else
{
// vec[++i] = std::move(vec[j]);
++i;
std::get<0>(vec[i]) = std::get<0>(vec[j]);
std::get<1>(vec[i]) = std::get<1>(vec[j]);
std::get<2>(vec[i]) = std::get<2>(vec[j]);
}
++j;
}
return i + 1;
}
template <typename IT>
void initializeBlockerBoundary(IT* nums_per_col_blocker, uint16_t num_blockers, IT* blocker_begin_ptr,
IT* blocker_end_ptr)
{
blocker_begin_ptr[0] = 0;
blocker_end_ptr[0] = 0;
for (uint16_t blocker_index = 1; blocker_index < num_blockers; ++blocker_index)
{
blocker_begin_ptr[blocker_index] = blocker_begin_ptr[blocker_index - 1] + nums_per_col_blocker[blocker_index - 1];
blocker_end_ptr[blocker_index] = blocker_begin_ptr[blocker_index];
}
}
template <typename IT, typename NT>
void OuterSpGEMM_stage(const CSC<IT, NT>& A, const CSR<IT, NT>& B, IT startIdx, IT endIdx, CSR<IT, NT>& C, \
int nblockers, int nblockchars, int partition)
{
typedef tuple<IT, IT, NT> TripleNode;
const uint16_t nthreads = omp_get_max_threads();
uint16_t num_blockers = nblockers;
const uint16_t block_width = nblockchars;
nrows_of_A = A.rows;
nrows_per_blocker = A.rows <= num_blockers * 64 ? 64 : (A.rows + num_blockers - 1) / num_blockers;
IT total_flop = 0;
IT *flops_by_row_blockers = my_malloc<IT>(num_blockers);
IT *flops_by_rows = my_malloc<IT>(A.rows);
IT *nnz_by_row = my_malloc<IT>(A.rows);
IT *global_blocker_counters = my_malloc<IT>(num_blockers);
TripleNode **global_blockers = my_malloc<TripleNode*>(num_blockers);
IT **local_blocker_counters = my_malloc<IT*>(nthreads);
TripleNode **local_blockers = my_malloc<TripleNode*>(nthreads);
TripleNode **sorting_buffer = my_malloc<TripleNode*>(nthreads);
IT *nnz_per_row_blocker = my_malloc<IT>(num_blockers);
do_static_symbolic(A, B, 0, A.cols, nrows_per_blocker, num_blockers, flops_by_row_blockers, total_flop);
for (uint16_t blocker_id=0; blocker_id<num_blockers; ++blocker_id)
global_blockers[blocker_id] = static_cast<TripleNode*>(::operator new(SIZE * flops_by_row_blockers[blocker_id]));
IT max_flops_in_row_blockers = *std::max_element(flops_by_row_blockers, flops_by_row_blockers + num_blockers);
// IT min_flops_in_row_blockers = *std::min_element(flops_by_row_blockers, flops_by_row_blockers + num_blockers);
// uint64_t avg_flops_in_row_blockers = 0;
// for (IT i = 0; i < num_blockers; ++i) {
// avg_flops_in_row_blockers += flops_by_row_blockers[i];
// max_flops_in_row_blockers = max(max_flops_in_row_blockers, flops_by_row_blockers[i]);
// }
// cout << "avg_flops_in_row_blockers = " << avg_flops_in_row_blockers / num_blockers << " max_flops_in_row_blockers = " << max_flops_in_row_blockers << " min_flops_in_row_blockers = " << min_flops_in_row_blockers << endl;
#pragma omp parallel
{
uint16_t thread_id = omp_get_thread_num();
TripleNode *begin_local_blockers, *cur_local_blockers, *end_local_blockers, *cur_global_blockers;
local_blockers[thread_id] = static_cast<TripleNode*>(::operator new(SIZE * num_blockers * block_width));
local_blocker_counters[thread_id] = my_malloc<IT>(num_blockers);
sorting_buffer[thread_id] = static_cast<TripleNode*>(::operator new(SIZE * max_flops_in_row_blockers));
// computing phase
#pragma omp for nowait
for (IT idx = startIdx; idx < endIdx; ++idx)
{
// std::set<int>::iterator it = dense_cols.find(idx);
// if (it != dense_cols.end()) continue;
for (IT j = A.colptr[idx]; j < A.colptr[idx + 1]; ++j) // ncols(A) * 4
{
IT rowid = A.rowids[j]; // nnz(A) * 4
uint16_t row_blocker_id = rowid / nrows_per_blocker;
begin_local_blockers = local_blockers[thread_id] + row_blocker_id * block_width;
cur_local_blockers = begin_local_blockers + local_blocker_counters[thread_id][row_blocker_id];
end_local_blockers = begin_local_blockers + block_width;
for (IT k = B.rowptr[idx]; k < B.rowptr[idx + 1]; ++k) // nrows(B) * 4
{
// *cur_local_blockers = std::move(TripleNode(A.rowids[j], B.colids[k], A.values[j] * B.values[k]));
std::get<0>(*cur_local_blockers) = A.rowids[j];
std::get<1>(*cur_local_blockers) = B.colids[k];
std::get<2>(*cur_local_blockers) = A.values[j] * B.values[k];
cur_local_blockers++;
if (cur_local_blockers == end_local_blockers) // flop * 16
{
// cur_global_blockers = global_blockers[row_blocker_id] + __sync_fetch_and_add(&global_blocker_counters[row_blocker_id], block_width);
// for (IT offset=0; offset<block_width; ++offset)
// {
// std::get<0>(cur_global_blockers[offset]) = std::get<0>(begin_local_blockers[offset]);
// std::get<1>(cur_global_blockers[offset]) = std::get<1>(begin_local_blockers[offset]);
// std::get<2>(cur_global_blockers[offset]) = std::get<2>(begin_local_blockers[offset]);
// // cur_global_blockers[offset] = begin_local_blockers[offset];
// }
std::memcpy(
global_blockers[row_blocker_id] + __sync_fetch_and_add(&global_blocker_counters[row_blocker_id], block_width),
begin_local_blockers,
block_width * SIZE
);
cur_local_blockers = begin_local_blockers;
}
}
local_blocker_counters[thread_id][row_blocker_id] = cur_local_blockers - begin_local_blockers;
}
}
for (uint16_t row_blocker_id = 0; row_blocker_id < num_blockers; row_blocker_id++)
{
// cur_global_blockers = global_blockers[row_blocker_id] + __sync_fetch_and_add(&global_blocker_counters[row_blocker_id], local_blocker_counters[thread_id][row_blocker_id]);
// for (IT offset=0; offset<local_blocker_counters[thread_id][row_blocker_id]; ++offset)
// {
// std::get<0>(cur_global_blockers[offset]) = std::get<0>(local_blockers[thread_id][row_blocker_id * block_width + offset]);
// std::get<1>(cur_global_blockers[offset]) = std::get<1>(local_blockers[thread_id][row_blocker_id * block_width + offset]);
// std::get<2>(cur_global_blockers[offset]) = std::get<2>(local_blockers[thread_id][row_blocker_id * block_width + offset]);
// // cur_global_blockers[offset] = local_blockers[thread_id][row_blocker_id * block_width + offset];
// }
std::memcpy(
global_blockers[row_blocker_id] + __sync_fetch_and_add(&global_blocker_counters[row_blocker_id], local_blocker_counters[thread_id][row_blocker_id]),
local_blockers[thread_id] + row_blocker_id * block_width,
local_blocker_counters[thread_id][row_blocker_id] * SIZE
);
// local_blocker_counters[thread_id][row_blocker_id] = 0;
}
}
// // vector<pair<int, int> > scheduling_order;
// for (uint16_t blocker_id=0; blocker_id<num_blockers; ++blocker_id)
// scheduling_order.push_back(make_pair(-global_blocker_counters[blocker_id], blocker_id));
// sort(scheduling_order.begin(), scheduling_order.end());
// for (uint16_t row_blocker_id=0; row_blocker_id < num_blockers; ++ row_blocker_id)
// cout << scheduling_order[row_blocker_id].second << " " << global_blocker_counters[scheduling_order[row_blocker_id].second] << endl;
#pragma omp parallel
{
uint16_t thread_id = omp_get_thread_num();
#pragma omp for reduction(+ : nnz_per_row_blocker[:num_blockers])
for (uint16_t row_blocker_id=0; row_blocker_id < num_blockers; ++ row_blocker_id)
{
// uint16_t my_blocker_id = scheduling_order[row_blocker_id].second;
doRadixSort(global_blockers[row_blocker_id],
global_blockers[row_blocker_id] + global_blocker_counters[row_blocker_id],
sorting_buffer[thread_id]);
IT after = doMerge(global_blockers[row_blocker_id], global_blocker_counters[row_blocker_id]);
nnz_per_row_blocker[row_blocker_id] += after;
}
}
IT *cumulative_row_indices = my_malloc<IT>(num_blockers + 1);
scan(nnz_per_row_blocker, cumulative_row_indices, (IT)(num_blockers) + 1);
IT total_nnz = cumulative_row_indices[num_blockers];
if (C.isEmpty())
{
C.make_empty();
}
C.rows = A.rows;
C.cols = B.cols;
C.colids = static_cast<IT*>(::operator new(sizeof(IT[total_nnz])));
C.rowptr = static_cast<IT*>(::operator new(sizeof(IT[C.rows+1])));
C.values = static_cast<NT*>(::operator new(sizeof(NT[total_nnz])));
C.rowptr[0] = 0;
#pragma omp parallel for
for (uint16_t row_blocker_id = 0; row_blocker_id < num_blockers; ++row_blocker_id)
{
IT base = cumulative_row_indices[row_blocker_id];
// auto space_addr = global_blockers[row_blocker_id];
TripleNode* this_blocker = global_blockers[row_blocker_id];
for (IT i = 0; i < nnz_per_row_blocker[row_blocker_id]; ++i)
{
++nnz_by_row[std::get<0>(this_blocker[i])];
C.colids[base+i] = std::get<1>(this_blocker[i]);
C.values[base+i] = std::get<2>(this_blocker[i]);
// ++nnz_by_row[std::get<0>(space_addr[index])];
// C.colids[base + index] = std::get<1>(space_addr[index]);
// C.values[base + index] = std::get<2>(space_addr[index]);
}
}
scan(nnz_by_row, C.rowptr, C.rows + 1);
C.nnz = total_nnz;
my_free<IT>(flops_by_row_blockers);
my_free<IT>(nnz_by_row);
my_free<IT>(nnz_per_row_blocker);
my_free<IT>(cumulative_row_indices);
for (uint16_t row_blocker_id = 0; row_blocker_id < num_blockers; ++row_blocker_id)
{
my_free<TripleNode>(global_blockers[row_blocker_id]);
}
my_free<TripleNode*>(global_blockers);
for (uint16_t thread_id=0; thread_id<nthreads; ++thread_id)
{
my_free<TripleNode>(local_blockers[thread_id]);
my_free<IT>(local_blocker_counters[thread_id]);
}
my_free<TripleNode*>(local_blockers);
my_free<IT*>(local_blocker_counters);
}
template <typename IT, typename NT>
void OuterSpGEMM(const CSC<IT, NT>& A, const CSR<IT, NT>& B, CSR<IT, NT>& C, int nblockers, int nblockchars, int partition)
{
// unsigned nodes = numa_num_configured_nodes();
// unsigned cores = numa_num_configured_cpus();
// unsigned cores_per_node = cores / nodes;
// omp_set_num_threads(cores_per_node);
// cout << "Printing" << endl;
// #pragma omp parallel
// {
// #pragma omp critical
// cout << "executing: cpu_id-> " << sched_getcpu() << "(" << omp_get_num_threads() << ") numa id-> " << numa_node_of_cpu(sched_getcpu()) << " partition -> " << partition << endl;
// }
OuterSpGEMM_stage(A, B, 0, A.cols, C, nblockers, nblockchars, partition);
}
|
column_matrix.h | /*!
* Copyright 2017 by Contributors
* \file column_matrix.h
* \brief Utility for fast column-wise access
* \author Philip Cho
*/
#ifndef TSOOBGX_COMMON_COLUMN_MATRIX_H_
#define TSOOBGX_COMMON_COLUMN_MATRIX_H_
#include <limits>
#include <vector>
#include "hist_util.h"
namespace tsoobgx {
namespace common {
/*! \brief column type */
enum ColumnType {
kDenseColumn,
kSparseColumn
};
/*! \brief a column storage, to be used with ApplySplit. Note that each
bin id is stored as index[i] + index_base. */
class Column {
public:
Column(ColumnType type, const uint32_t* index, uint32_t index_base,
const size_t* row_ind, size_t len)
: type_(type),
index_(index),
index_base_(index_base),
row_ind_(row_ind),
len_(len) {}
size_t Size() const { return len_; }
uint32_t GetGlobalBinIdx(size_t idx) const { return index_base_ + index_[idx]; }
uint32_t GetFeatureBinIdx(size_t idx) const { return index_[idx]; }
// column.GetFeatureBinIdx(idx) + column.GetBaseIdx(idx) ==
// column.GetGlobalBinIdx(idx)
uint32_t GetBaseIdx() const { return index_base_; }
ColumnType GetType() const { return type_; }
size_t GetRowIdx(size_t idx) const {
// clang-tidy worries that row_ind_ might be a nullptr, which is possible,
// but low level structure is not safe anyway.
return type_ == ColumnType::kDenseColumn ? idx : row_ind_[idx]; // NOLINT
}
bool IsMissing(size_t idx) const {
return index_[idx] == std::numeric_limits<uint32_t>::max();
}
const size_t* GetRowData() const { return row_ind_; }
private:
ColumnType type_;
const uint32_t* index_;
uint32_t index_base_;
const size_t* row_ind_;
const size_t len_;
};
/*! \brief a collection of columns, with support for construction from
GHistIndexMatrix. */
class ColumnMatrix {
public:
// get number of features
inline bst_uint GetNumFeature() const {
return static_cast<bst_uint>(type_.size());
}
// construct column matrix from GHistIndexMatrix
inline void Init(const GHistIndexMatrix& gmat,
double sparse_threshold) {
const int32_t nfeature = static_cast<int32_t>(gmat.cut.row_ptr.size() - 1);
const size_t nrow = gmat.row_ptr.size() - 1;
// identify type of each column
feature_counts_.resize(nfeature);
type_.resize(nfeature);
std::fill(feature_counts_.begin(), feature_counts_.end(), 0);
uint32_t max_val = std::numeric_limits<uint32_t>::max();
for (bst_uint fid = 0; fid < nfeature; ++fid) {
CHECK_LE(gmat.cut.row_ptr[fid + 1] - gmat.cut.row_ptr[fid], max_val);
}
gmat.GetFeatureCounts(&feature_counts_[0]);
// classify features
for (int32_t fid = 0; fid < nfeature; ++fid) {
if (static_cast<double>(feature_counts_[fid])
< sparse_threshold * nrow) {
type_[fid] = kSparseColumn;
} else {
type_[fid] = kDenseColumn;
}
}
// want to compute storage boundary for each feature
// using variants of prefix sum scan
boundary_.resize(nfeature);
size_t accum_index_ = 0;
size_t accum_row_ind_ = 0;
for (int32_t fid = 0; fid < nfeature; ++fid) {
boundary_[fid].index_begin = accum_index_;
boundary_[fid].row_ind_begin = accum_row_ind_;
if (type_[fid] == kDenseColumn) {
accum_index_ += static_cast<size_t>(nrow);
accum_row_ind_ += static_cast<size_t>(nrow);
} else {
accum_index_ += feature_counts_[fid];
accum_row_ind_ += feature_counts_[fid];
}
boundary_[fid].index_end = accum_index_;
boundary_[fid].row_ind_end = accum_row_ind_;
}
index_.resize(boundary_[nfeature - 1].index_end);
row_ind_.resize(boundary_[nfeature - 1].row_ind_end);
// store least bin id for each feature
index_base_.resize(nfeature);
for (bst_uint fid = 0; fid < nfeature; ++fid) {
index_base_[fid] = gmat.cut.row_ptr[fid];
}
// pre-fill index_ for dense columns
#pragma omp parallel for
for (int32_t fid = 0; fid < nfeature; ++fid) {
if (type_[fid] == kDenseColumn) {
const size_t ibegin = boundary_[fid].index_begin;
uint32_t* begin = &index_[ibegin];
uint32_t* end = begin + nrow;
std::fill(begin, end, std::numeric_limits<uint32_t>::max());
// max() indicates missing values
}
}
// loop over all rows and fill column entries
// num_nonzeros[fid] = how many nonzeros have this feature accumulated so far?
std::vector<size_t> num_nonzeros;
num_nonzeros.resize(nfeature);
std::fill(num_nonzeros.begin(), num_nonzeros.end(), 0);
for (size_t rid = 0; rid < nrow; ++rid) {
const size_t ibegin = gmat.row_ptr[rid];
const size_t iend = gmat.row_ptr[rid + 1];
size_t fid = 0;
for (size_t i = ibegin; i < iend; ++i) {
const uint32_t bin_id = gmat.index[i];
while (bin_id >= gmat.cut.row_ptr[fid + 1]) {
++fid;
}
if (type_[fid] == kDenseColumn) {
uint32_t* begin = &index_[boundary_[fid].index_begin];
begin[rid] = bin_id - index_base_[fid];
} else {
uint32_t* begin = &index_[boundary_[fid].index_begin];
begin[num_nonzeros[fid]] = bin_id - index_base_[fid];
row_ind_[boundary_[fid].row_ind_begin + num_nonzeros[fid]] = rid;
++num_nonzeros[fid];
}
}
}
}
/* Fetch an individual column. This code should be used with TSOOBGX_TYPE_SWITCH
to determine type of bin id's */
inline Column GetColumn(unsigned fid) const {
Column c(type_[fid], &index_[boundary_[fid].index_begin], index_base_[fid],
(type_[fid] == ColumnType::kSparseColumn ?
&row_ind_[boundary_[fid].row_ind_begin] : nullptr),
boundary_[fid].index_end - boundary_[fid].index_begin);
return c;
}
private:
struct ColumnBoundary {
// indicate where each column's index and row_ind is stored.
// index_begin and index_end are logical offsets, so they should be converted to
// actual offsets by scaling with packing_factor_
size_t index_begin;
size_t index_end;
size_t row_ind_begin;
size_t row_ind_end;
};
std::vector<size_t> feature_counts_;
std::vector<ColumnType> type_;
SimpleArray<uint32_t> index_; // index_: may store smaller integers; needs padding
SimpleArray<size_t> row_ind_;
std::vector<ColumnBoundary> boundary_;
// index_base_[fid]: least bin id for feature fid
std::vector<uint32_t> index_base_;
};
} // namespace common
} // namespace tsoobgx
#endif // TSOOBGX_COMMON_COLUMN_MATRIX_H_
|
Searching.202007270051.subsearch.profile.h | //
// Created by Zhen Peng on 7/27/2020.
//
#ifndef BATCH_SEARCHING_SEARCHING_H
#define BATCH_SEARCHING_SEARCHING_H
#include <vector>
#include <boost/dynamic_bitset.hpp>
//#include <boost/sort/sort.hpp>
#include <iostream>
#include <fstream>
#include <unordered_map>
#include <immintrin.h>
#include <cstring>
#include <unordered_set>
#include <set>
#include <cfloat>
#include <algorithm>
//#include <omp.h>
#include "../include/definitions.h"
//#include "../include/efanna2e/neighbor.h"
#include "../include/utils.h"
#include "../include/Candidate.h"
#include "../include/parallelization.h"
#include "../include/bitvector.h"
namespace PANNS {
class Searching {
//private:
public:
idi num_v_ = 0;
edgei num_e_ = 0;
idi num_queries_ = 0;
uint64_t dimension_ = 0;
idi width_ = 0; // NSG largest degree
idi ep_ = 0; // Start point
// std::vector<dataf> data_load_;
// std::vector<dataf> queries_load_;
// std::vector< std::vector<dataf> > data_load_;
// std::vector< std::vector<dataf> > queries_load_;
// std::vector<distf> norms_;
dataf *data_load_ = nullptr;
dataf *queries_load_ = nullptr;
// dataf *norms_;
// std::vector< std::vector<idi> > nsg_graph_;
// idi *nsg_graph_indices_;
// idi *nsg_graph_out_edges_;
// std::vector< std::vector<idi> > edge_list_;
char *opt_nsg_graph_ = nullptr;
uint64_t data_bytes_;
uint64_t neighbor_bytes_;
uint64_t vertex_bytes_;
// For multithreads
int num_threads_ = 1;
// int num_real_threads_ = 1;
// int num_threads_intra_query_ = 1;
// int num_threads_inter_query_ = 1;
dataf compute_norm(
const dataf *data) const;
// idi vertex_id);
// const std::vector<PANNS::dataf> &data);
// size_t loc_start,
// idi dimension)
dataf compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<dataf> &d_data,
// const std::vector<dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
const dataf vertex_norm) const;
// idi dimension)
static idi insert_into_queue(
std::vector<Candidate> &c_queue,
idi c_queue_top,
Candidate cand);
static idi add_into_queue(
std::vector<PANNS::Candidate> &queue,
idi &queue_top,
const idi queue_size,
const PANNS::Candidate &cand);
static idi add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_size,
const idi queue_capacity,
const PANNS::Candidate &cand);
static void add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_top, // The number of elements in queue, independent with queue_start
const idi queue_size); // The maximum capacity of queue, independent with queue_start.
static void insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue_base,
const idi insert_index,
const idi queue_start,
const idi queue_size);
// idi insert_into_queue_nsg(
// std::vector< Candidate > &c_queue,
// idi c_queue_top,
// Candidate cand);
static idi merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
static void merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
idi merge_all_queues_para_list(
std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<idi> &local_queues_ends,
std::vector<Candidate> &set_L,
const idi L);
// idi merge_all_queues_para_array(
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// std::vector<Candidate> &set_L,
// const idi L);
idi merge_all_queues_para_array(
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L);
idi merge_all_queues_queue_base(
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi queue_base,
const int real_threads,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L);
void merge_two_consecutive_queues_in_place(
std::vector<Candidate> &two_queues,
const idi base_1,
// const idi &end_1,
const idi base_2,
const idi &length_2);
void merge_in_set_L(
std::vector<Candidate> &set_L,
const idi set_L_length,
const idi num_queues,
const idi local_queue_length);
distf selecting_top_L_seq(
std::vector<Candidate> &set_L,
const idi global_L,
// const idi local_L,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes);
void selecting_unchecked_top_M_seq(
const idi query_id,
const idi iter,
std::vector<Candidate> &set_L,
const std::vector<idi> &pointers_starts,
const idi value_M,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
const std::vector<idi> &local_queues_sizes,
std::vector<idi> &local_m_counts);
// idi merge_all_queues_all_together_in_sequential(
// std::vector<Candidate> &set_L,
// std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L);
// idi min_all_queues_at_heads(
// const std::vector<Candidate> &set_L,
// std::vector<idi> &queue_heads,
// const std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L);
public:
// For Profiling
// L3CacheMissRate cache_miss_kernel;
uint64_t count_distance_computation_ = 0;
uint64_t count_add_to_queue_ = 0;
// uint64_t count_single_query_computation_ = 0;
// distf dist_min_ = 0;
// distf dist_max_ = 0;
double time_merge_ = 0;
double time_select_ = 0;
// double time_select_L_ = 0.0;
// double time_select_M_ = 0.0;
double time_initialization_ = 0;
double time_sequential_phase_ = 0;
double time_parallel_phase_ = 0;
double time_ending_ = 0.0;
double time_assign_s_ = 0.0;
double time_expand_ = 0.0;
double time_pick_top_m_ = 0.0;
double time_distance_computation_ = 0.0;
double time_add_to_queue_ = 0.0;
// double time_insert_ = 0;
// double time_compare_minimum_ = 0;
// double time_memmove_ = 0;
// std::vector<double> time_memmove_list_;
// L3CacheMissRate profile_miss_rate;
// uint64_t number_local_elements_ = 0;
// std::vector<idi> L_ids_;
// std::vector<idi> M_ids_;
~Searching()
{
free(data_load_);
data_load_ = nullptr;
// free(queries_load_);
// _mm_free(data_load_);
free(queries_load_);
queries_load_ = nullptr;
// free(norms_);
// free(nsg_graph_indices_);
// free(nsg_graph_out_edges_);
free(opt_nsg_graph_);
opt_nsg_graph_ = nullptr;
}
void load_data_load(char *filename);
void load_queries_load(char *filename);
void load_nsg_graph(char *filename);
// void build_opt_graph();
void prepare_init_ids(
std::vector<unsigned> &init_ids,
const unsigned L) const;
// void prepare_candidate_queue_list(
// const float *query_load,
// std::vector<std::vector<efanna2e::Neighbor> > &retset_list,
// std::vector<boost::dynamic_bitset<> > &is_visited_list,
// const std::vector<unsigned> &init_ids,
// const boost::dynamic_bitset<> &flags,
// unsigned batch_start,
// unsigned batch_size,
// unsigned L);
// void search_in_batch(
//// const float *query_load,
// size_t K,
// size_t L,
// unsigned batch_start,
// unsigned batch_size,
// std::vector< std::vector<Candidate> > &set_L_list,
// std::vector< boost::dynamic_bitset<> > &is_visited_list,
// const std::vector<idi> &init_ids,
// const boost::dynamic_bitset<> &is_visited,
// std::vector<std::vector<idi> > &set_K_list);
void search_in_sequential(
idi query_id,
idi K,
idi L,
std::vector<Candidate> &set_L,
// boost::dynamic_bitset<> &is_visited,
// boost::dynamic_bitset<> is_visited,
// std::vector<idi> &init_ids,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
// void search_in_sequential_BitVector(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// idi get_out_degree(idi v_id) const
// {
// if (v_id < num_v_ - 1) {
// return nsg_graph_indices_[v_id + 1] - nsg_graph_indices_[v_id];
// } else {
// return num_e_ - nsg_graph_indices_[v_id];
// }
// }
void search_with_top_m(
idi M,
idi query_id,
idi K,
idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
// std::vector< std::vector<idi> > &top_m_list);
void search_with_top_m_scale_m(
const PANNS::idi value_M_max,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
// void search_with_top_m_myths_M(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void search_with_top_m_to_get_distance_range(
// const PANNS::idi M,
// const PANNS::idi query_id,
//// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids);
// void search_with_top_m_profile_bit_CAS(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void search_with_top_m_no_local_arrays(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// boost::dynamic_bitset<> &is_visited);
void search_with_top_m_in_batch(
PANNS::idi M,
PANNS::idi batch_start,
PANNS::idi batch_size,
PANNS::idi K,
PANNS::idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list);
// void para_search_with_top_m_critical_area(
// idi M,
// idi query_id,
// idi K,
// idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_critical_area_no_omp(
// idi M,
// idi query_id,
// idi K,
// idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_critical_area_yes_omp(
// idi M,
// idi query_id,
// idi K,
// idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_visited_array(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited);
// void para_search_with_top_m_merge_queues(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_queues_seq_merge(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_merge_queues_no_CAS(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length,
// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<idi> &local_queues_ends,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_in_array(
// void para_search_with_top_m_merge_queues_new_threshold(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_by_sort(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &dest_offsets,
// const std::vector<idi> &offsets_load_set_L, // Offsets for store into set_L.
// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_better_merge_v0(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
//// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_better_merge_v2(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited,
// std::vector<distf> &local_thresholds);
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_better_merge_v1(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
//// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
//// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_better_merge_v0_0(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_less_merge(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited,
// std::vector<distf> &local_thresholds);
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_no_merge(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited,
// std::vector<distf> &local_thresholds,
// const uint64_t computation_threshold);
// void para_search_with_top_m_merge_queues_scale_m_v0(
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_middle_m(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
std::vector<idi> &top_m_candidates,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited);
// std::vector<distf> &local_thresholds);
// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_scale_m_v2(
// const idi value_M_min,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_scale_m_v3(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_middle_m_no_merge(
const uint64_t computation_threshold,
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
const idi init_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_sequential_merge(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_nested_para(
const idi batch_start,
const idi batch_size,
const idi value_M_middle,
const idi value_M_max,
const idi K,
const idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue
std::vector< std::vector<idi> > &top_m_candidates_list,
std::vector< boost::dynamic_bitset<> > &is_visited_list);
void subsearch_with_top_m(
const idi value_M_max,
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &local_top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation);
void subsearch_top_m_for_one_iteration(
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation);
void subsearch_top_m_for_one_iteration_lth(
const distf bound_lth,
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation);
void subsearch_top_m_for_one_iteration_lth_mth(
const distf bound_lth,
// const idi top_m_position,
const idi iter,
idi &k_uc,
const idi local_m_count,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation,
double &time_pick_top_m,
uint64_t &count_add_to_queue,
double &time_distance_computation,
double &time_add_to_queue);
void para_search_with_top_m_subsearch_v0(
// const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector< std::vector<idi> > &top_m_candidates_list,
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_subsearch_v1(
const idi local_M_max,
const idi query_id,
const idi K,
const idi global_L,
const idi local_L,
const idi total_L,
const idi init_queue_end,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const std::vector<idi> &local_queues_bases,
std::vector<idi> &local_queues_ends,
std::vector< std::vector<idi> > &top_m_candidates_list,
boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_subsearch_v2(
// const idi local_M_max,
// const idi query_id,
// const idi K,
// const idi global_L,
// const idi local_L,
// const idi total_L,
// const idi init_queue_size,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const std::vector<idi> &local_queues_starts,
// std::vector<idi> &local_queues_sizes,
// std::vector<idi> &local_m_counts,
// std::vector< std::vector<idi> > &top_m_candidates_list,
// boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_subsearch_v3(
const idi local_M_max,
const idi local_M_middle,
const idi query_id,
const idi K,
const idi global_L,
const idi local_L,
// const idi total_L,
// const idi init_queue_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
std::vector<idi> &local_m_counts,
std::vector< std::vector<idi> > &top_m_candidates_list,
boost::dynamic_bitset<> &is_visited);
void subsearch_for_simple_search(
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi base_set_L,
idi &set_L_end,
// std::vector<uint8_t> &is_visited,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation);
void para_simple_search_subsearch(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited);
boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_global_threshold(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_distance_threshold_m(
//// const idi value_M_middle,
//// const idi value_M_max,
// const distf relative_dist_threshold,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
//// std::vector<distf> &local_thresholds)
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_distance_threshold_m_middle_iteration(
//// const idi value_M_middle,
//// const idi value_M_max,
// const distf relative_dist_threshold,
// const idi middle_iteration,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_collectors(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_selecting(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_myths(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited);
//// std::vector<uint8_t> &is_visited);
//// boost::dynamic_bitset<> &is_visited);
//// void para_prepare_init_ids(
//// std::vector<unsigned> &init_ids,
//// unsigned L) const;
// void para_search_with_top_m_in_batch_embarassing_para(
// const PANNS::idi M,
// const PANNS::idi batch_start,
// const PANNS::idi batch_size,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector< std::vector<Candidate> > &set_L_list,
// const std::vector<idi> &init_ids,
// std::vector< std::vector<idi> > &set_K_list,
// std::vector< boost::dynamic_bitset<> > &is_visited_list);
// void test_neighbors_distance_to_father(
// const idi num_selected) const;
// void test_neighbors_normalized_distance_to_father(
// const idi num_selected) const;
void load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list);
void get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const;
}; // Class Searching
/**
* Input the data from the file.
* @param filename
*/
inline void Searching::load_data_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
data_load_,
num_v_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: data dimension " << dimension_
<< " is not equal to query dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input queries from the file.
* @param filename
*/
inline void Searching::load_queries_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
queries_load_,
num_queries_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: query dimension " << dimension_
<< " is not equal to data dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input the NSG graph from the file.
* Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp
* @param filename
*/
inline void Searching::load_nsg_graph(char *filename)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
std::cerr << "Error: cannot read file " << filename << " ." << std::endl;
exit(EXIT_FAILURE);
}
fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned));
fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned));
data_bytes_ = (1 + dimension_) * sizeof(dataf);
neighbor_bytes_ = (1 + width_) * sizeof(idi);
vertex_bytes_ = data_bytes_ + neighbor_bytes_;
opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_);
if (!opt_nsg_graph_) {
std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl;
exit(EXIT_FAILURE);
}
idi v_id = 0;
num_e_ = 0;
char *base_location = opt_nsg_graph_;
while (true) {
idi degree;
fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
if (fin.eof()) {
break;
}
num_e_ += degree;
// std::vector<idi> tmp_ngbrs(degree);
// fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned));
// Norm and data
distf norm = compute_norm(data_load_ + v_id * dimension_);
// distf norm = compute_norm(v_id);
std::memcpy(base_location, &norm, sizeof(distf)); // Norm
memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data
base_location += data_bytes_;
// Neighbors
memcpy(base_location, °ree, sizeof(idi)); // Number of neighbors
fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors
// memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned));
base_location += neighbor_bytes_;
++v_id;
}
if (v_id != num_v_) {
std::cerr << "Error: NSG data has " << v_id
<< " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
exit(EXIT_FAILURE);
}
free(data_load_);
data_load_ = nullptr;
// ////////////////////////
// idi v_id = 0;
// num_e_ = 0;
// while (true) {
// idi degree;
// fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
// if (fin.eof()) {
// break;
// }
// num_e_ += degree;
//
// std::vector<idi> ngbrs(degree);
// fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned));
//// nsg_graph_.push_back(ngbrs);
//// tmp_edge_list.push_back(ngbrs);
// edge_list_.push_back(ngbrs);
// ++v_id;
// }
// if (v_id != num_v_) {
// std::cerr << "Error: NSG data has " << v_id
// << " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
// exit(EXIT_FAILURE);
// }
}
/**
* Load those true top-K neighbors (ground truth) of queries
* @param filename
* @param[out] true_nn_list
*/
inline void Searching::load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list)
// unsigned &t_K)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
fprintf(stderr, "Error: cannot open file %s\n", filename);
exit(EXIT_FAILURE);
}
idi t_query_num;
idi t_K;
// unsigned t_K;
fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num));
fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K));
// if (t_query_num != query_num) {
// fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n",
// query_num, t_query_num, filename);
// exit(EXIT_FAILURE);
// }
if (t_query_num < num_queries_) {
fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_);
exit(EXIT_FAILURE);
}
if (t_K < 100) {
fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
exit(EXIT_FAILURE);
}
// data = new unsigned[(size_t) t_query_num * (size_t) t_K];
true_nn_list.resize(t_query_num);
for (idi q_i = 0; q_i < t_query_num; ++q_i) {
true_nn_list[q_i].resize(t_K);
}
for (unsigned q_i = 0; q_i < t_query_num; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned n_i = 0; n_i < t_K; ++n_i) {
unsigned id;
float dist;
fin.read(reinterpret_cast<char *>(&id), sizeof(id));
fin.read(reinterpret_cast<char *>(&dist), sizeof(dist));
// data[offset + n_i] = id;
true_nn_list[q_i][n_i] = id;
}
}
fin.close();
}
inline void Searching::get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const
{
// if (t_K < 100) {
// fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
// exit(EXIT_FAILURE);
// }
if (true_nn_list[0].size() < 100) {
fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n");
exit(EXIT_FAILURE);
}
recalls[1] = 0.0;
recalls[5] = 0.0;
recalls[10] = 0.0;
recalls[20] = 0.0;
recalls[50] = 0.0;
recalls[100] = 0.0;
for (unsigned q_i = 0; q_i < num_queries_; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned top_i = 0; top_i < 100; ++top_i) {
unsigned true_id = true_nn_list[q_i][top_i];
for (unsigned n_i = 0; n_i < 100; ++n_i) {
if (set_K_list[q_i][n_i] == true_id) {
if (n_i < 1) recalls[1] += 1;
if (n_i < 5) recalls[5] += 1;
if (n_i < 10) recalls[10] += 1;
if (n_i < 20) recalls[20] += 1;
if (n_i < 50) recalls[50] += 1;
if (n_i < 100) recalls[100] += 1;
}
}
}
}
recalls[1] /= 1.0 * num_queries_;
recalls[5] /= 5.0 * num_queries_;
recalls[10] /= 10.0 * num_queries_;
recalls[20] /= 20.0 * num_queries_;
recalls[50] /= 50.0 * num_queries_;
recalls[100] /= 100.0 * num_queries_;
}
inline void Searching::search_in_sequential(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
// {//test
// printf("Iteration: Relative_Distance:\n");
//// printf("Iteration: Relative_Distance:\n");
//// printf("----query: %u----\n", query_id);
// }
boost::dynamic_bitset<> is_visited(num_v_);
for (idi v_i = 0; v_i < L; ++v_i) {
is_visited[init_ids[v_i]] = true;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
idi k = 0; // Index of every queue's first unchecked candidate.
idi tmp_count = 0; // for debug
// {// Print relative distance
//// distf top_dist = set_L[0].distance_;
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%u %f\n",
// tmp_count, set_L[i_l].distance_);
//// tmp_count, set_L[i_l].distance_ - top_dist);
// }
// }
while (k < L) {
Candidate &top_cand = set_L[k];
unsigned nk = L;
if (!top_cand.is_checked_) {
++tmp_count;
top_cand.is_checked_ = true;
idi v_id = top_cand.id_; // Vertex ID.
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
// Traverse v_id's all neighbors, pushing them into the queue
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// Compute the distance
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
// Insert into the queue
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
// {// Print relative distance
//// distf top_dist = set_L[0].distance_;
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%u %f\n",
// tmp_count, set_L[i_l].distance_);
//// tmp_count, set_L[i_l].distance_ - top_dist);
// }
// }
}
if (nk <= k) {
k = nk;
} else {
++k;
}
}
// cache_miss_kernel.measure_stop();
for (size_t k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
// {//test
// if (0 == query_id) {
// exit(1);
// }
// }
}
//inline void Searching::search_in_sequential_BitVector(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// boost::dynamic_bitset<> is_visited(num_v_);
// BitVector is_visited(num_v_);
//
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
//// is_visited[init_ids[v_i]] = true;
// is_visited.atomic_set_bit(init_ids[v_i]);
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
// Candidate &top_cand = set_L[k];
// unsigned nk = L;
// if (!top_cand.is_checked_) {
// top_cand.is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = true;
//
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//// cache_miss_kernel.measure_stop();
//#pragma omp parallel for
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
/**
* Prepare init_ids and flags, as they are constant for all queries.
* @param[out] init_ids
* @param L
*/
inline void Searching::prepare_init_ids(
std::vector<unsigned int> &init_ids,
const unsigned L) const
{
// idi num_ngbrs = get_out_degree(ep_);
// edgei edge_start = nsg_graph_indices_[ep_];
// // Store ep_'s neighbors as candidates
// idi tmp_l = 0;
// for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) {
// init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l];
// }
// std::unordered_set<idi> visited_ids;
boost::dynamic_bitset<> is_selected(num_v_);
idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
idi init_ids_end = 0;
// for (; tmp_l < L && tmp_l < out_degree; tmp_l++) {
for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) {
// idi v_id = out_edges[tmp_l];
idi v_id = out_edges[e_i];
if(is_selected[v_id]) {
continue;
}
is_selected[v_id] = true;
// init_ids[tmp_l] = v_id;
init_ids[init_ids_end++] = v_id;
// init_ids[tmp_l] = out_edges[tmp_l];
// visited_ids.insert(init_ids[tmp_l]);
}
// for (idi i = 0; i < tmp_l; ++i) {
// is_visited[init_ids[i]] = true;
// }
// If ep_'s neighbors are not enough, add other random vertices
idi tmp_id = ep_ + 1; // use tmp_id to replace rand().
while (init_ids_end < L) {
tmp_id %= num_v_;
idi v_id = tmp_id++;
if (is_selected[v_id]) {
continue;
}
// if (visited_ids.find(id) != visited_ids.end()) {
// continue;
// }
is_selected[v_id] = true;
// visited_ids.insert(id);
init_ids[init_ids_end++] = v_id;
// tmp_l++;
}
}
// TODO: re-code in AVX-512
inline dataf Searching::compute_norm(
const dataf *data) const
// idi vertex_id)
// const std::vector<PANNS::dataf> &data)
// size_t loc_start,
// idi dimension)
{
// const dataf *a = data.data() + loc_start;
// const dataf *a = data_load_ + vertex_id * dimension_;
// idi size = dimension_;
dataf result = 0;
//#define AVX_L2NORM(addr, dest, tmp) \
// tmp = _mm256_load_ps(addr); \
// tmp = _mm256_mul_ps(tmp, tmp); \
// dest = _mm256_add_ps(dest, tmp);
#define AVX_L2NORM(addr, dest, tmp) \
tmp = _mm256_loadu_ps(addr); \
tmp = _mm256_mul_ps(tmp, tmp); \
dest = _mm256_add_ps(dest, tmp);
__m256 sum;
__m256 l0, l1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = data;
const float *e_l = l + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_L2NORM(e_l, sum, l0); }
for (unsigned i = 0; i < DD; i += 16, l += 16) {
AVX_L2NORM(l, sum, l0);
AVX_L2NORM(l + 8, sum, l1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
return result;
}
inline dataf Searching::compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<PANNS::dataf> &d_data,
// const std::vector<PANNS::dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
const dataf vertex_norm) const
// idi dimension)
{
// idi size = dimension_;
float result = 0;
//#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
// tmp1 = _mm256_load_ps(addr1);\
// tmp2 = _mm256_load_ps(addr2);\
// tmp1 = _mm256_mul_ps(tmp1, tmp2); \
// dest = _mm256_add_ps(dest, tmp1);
#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
tmp1 = _mm256_loadu_ps(addr1);\
tmp2 = _mm256_loadu_ps(addr2);\
tmp1 = _mm256_mul_ps(tmp1, tmp2); \
dest = _mm256_add_ps(dest, tmp1);
__m256 sum;
__m256 l0, l1;
__m256 r0, r1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = v_data;
const float *r = q_data;
// const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf));
// const float *r = queries_load_ + query_id * dimension_;
const float *e_l = l + DD;
const float *e_r = r + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); }
for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) {
AVX_DOT(l, r, sum, l0, r0);
AVX_DOT(l + 8, r + 8, sum, l1, r1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
result = -2 * result + vertex_norm;
return result;
}
//// DEPRECATED.
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
//inline idi Searching::add_into_queue(
// std::vector<PANNS::Candidate> &queue,
// idi &queue_top,
// const idi queue_size,
// const PANNS::Candidate &cand)
//{
// assert(queue_size > 1);
// if (0 == queue_top) {
// queue[queue_top++] = cand;
// return 0;
// } else if (1 == queue_top) {
// if (queue[0] < cand) {
// queue[queue_top++] = cand;
// return 1;
// } else {
// queue[++queue_top] = queue[0];
// queue[0] = cand;
// return 0;
// }
// }
//
// if (queue[queue_top - 1] < cand) {
// if (queue_top < queue_size) {
// queue[queue_top++] = cand;
// }
// return queue_top;
// }
//
// idi r = insert_into_queue(
// queue,
// queue_top - 1,
// cand);
//// {//test
//// printf("r: %u"
//// "queue_top: %u "
//// "queue_size: %u\n",
//// r,
//// queue_top,
//// queue_size);
//// }
// return r;
//
//// /////////////////////////////////////////////////////////////
//// // Find the insert location
//// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand);
//// idi insert_loc = it_loc - queue.begin();
//// if (insert_loc == queue_size) {
//// return queue_size;
//// }
////
//// // Insert
////// if (queue_top == queue_size) {
////// // If full already
////// --queue_top;
////// }
//// memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
//// reinterpret_cast<char *>(queue.data() + insert_loc),
//// (queue_top - insert_loc) * sizeof(Candidate));
////// for (idi q_i = queue_top; q_i > insert_loc; --q_i) {
////// queue.at(q_i) = queue.at(q_i - 1);
////// }
//// queue[insert_loc] = cand;
//// ++queue_top;
//// return insert_loc;
//}
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
inline idi Searching::add_into_queue(
std::vector<PANNS::Candidate> &queue,
idi &queue_top,
const idi queue_size,
const PANNS::Candidate &cand)
{
if (0 == queue_top) {
queue[queue_top++] = cand;
return 0;
}
// Find the insert location
auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand);
idi insert_loc = it_loc - queue.begin();
if (insert_loc == queue_size) {
return queue_size;
}
// Insert
if (queue_top == queue_size) {
// If full already
--queue_top;
}
memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
reinterpret_cast<char *>(queue.data() + insert_loc),
(queue_top - insert_loc) * sizeof(Candidate));
// for (idi q_i = queue_top; q_i > insert_loc; --q_i) {
// queue.at(q_i) = queue.at(q_i - 1);
// }
queue[insert_loc] = cand;
++queue_top;
return insert_loc;
}
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
// add_into_queue with a queue_start
inline idi Searching::add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_size, // The insertion location starting from queue_start
const idi queue_capacity, // The maximum capacity of queue, independent with queue_start.
const PANNS::Candidate &cand)
{
if (0 == queue_size) {
queue[queue_start + queue_size++] = cand;
return 0;
}
idi queue_end = queue_start + queue_size;
// Find the insert location
const auto it_loc = std::lower_bound(queue.begin() + queue_start, queue.begin() + queue_end, cand);
// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_size, cand);
idi insert_loc = it_loc - queue.begin();
if (insert_loc != queue_end) {
if (cand.id_ == it_loc->id_) {
// Duplicate
return queue_capacity;
}
if (queue_size >= queue_capacity) { // Queue is full
--queue_size;
--queue_end;
}
} else { // insert_loc == queue_end, insert at the end?
if (queue_size < queue_capacity) { // Queue is not full
// Insert at the end
queue[insert_loc] = cand;
++queue_size;
return queue_size - 1;
} else { // Queue is full
return queue_capacity;
}
}
// Add into queue
memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
reinterpret_cast<char *>(queue.data() + insert_loc),
(queue_end - insert_loc) * sizeof(Candidate));
queue[insert_loc] = cand;
++queue_size;
return insert_loc - queue_start;
}
inline void Searching::add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_size, // The number of elements in queue, independent with queue_start
const idi queue_length) // The maximum capacity of queue, independent with queue_start.
{
const idi dest_index = queue_start + insert_index;
if (queue_size == queue_length) {
--queue_size;
}
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index) * sizeof(Candidate));
queue[dest_index] = cand;
++queue_size;
}
inline void Searching::insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index,
const idi queue_start,
const idi queue_size)
{
const idi dest_index = queue_start + insert_index;
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index - 1) * sizeof(Candidate));
queue[dest_index] = cand;
// memmove(reinterpret_cast<char *>(queue_base + dest_index + 1),
// reinterpret_cast<char *>(queue_base + dest_index),
// (queue_size - insert_index - 1) * sizeof(T));
// for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) {
// queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start);
// }
// queue_base[dest_index] = cand;
}
/**
* PANNS version of InsertIntoPool(): binary-search to find the insert place and then move.
* @param[out] c_queue
* @param c_queue_top
* @param cand
* @return
*/
inline idi Searching::insert_into_queue(
std::vector<PANNS::Candidate> &c_queue,
PANNS::idi c_queue_top,
PANNS::Candidate cand)
{
if (c_queue[0].distance_ > cand.distance_) {
// If the first
memmove(reinterpret_cast<char *>(c_queue.data() + 1),
reinterpret_cast<char *>(c_queue.data()),
c_queue_top * sizeof(Candidate));
c_queue[0] = cand;
return 0;
} else if (c_queue[c_queue_top - 1].distance_ == cand.distance_) {
// If the last
if (c_queue[c_queue_top - 1].id_ > cand.id_) {
// Use ID as the second metrics for ordering
c_queue[c_queue_top - 1] = cand;
return c_queue_top - 1;
} else {
return c_queue_top;
}
}
idi left = 0;
idi right = c_queue_top;
while (left < right) {
idi mid = (right - left) / 2 + left;
if (c_queue[mid].distance_ > cand.distance_) {
right = mid;
} else {
left = mid + 1;
}
}
// If the distance is the same
if (0 != left && c_queue[left - 1].distance_ != cand.distance_) {
;
} else {
while (0 != left
&& c_queue[left - 1].distance_ == cand.distance_
&& c_queue[left - 1].id_ > cand.id_) {
// Use ID as the second metrics for ordering
--left;
}
}
// Insert to left
memmove(reinterpret_cast<char *>(c_queue.data() + left + 1),
reinterpret_cast<char *>(c_queue.data() + left),
(c_queue_top - left) * sizeof(Candidate));
c_queue[left] = cand;
return left;
}
//inline void Searching::cand_pushes_ngbrs_into_queue(
// idi cand_id,
// const dataf *query_data,
// idi L,
// idi &new_k,
// boost::dynamic_bitset<> &is_visited,
// std::vector<Candidate> &set_L)
//{
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
//}
//inline void Searching::search_in_sequential(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K) const
//{
// boost::dynamic_bitset<> is_visited(num_v_);
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// is_visited[init_ids[v_i]] = true;
// }
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
// Candidate &top_cand = set_L[k];
// unsigned nk = L;
// if (!top_cand.is_checked_) {
// top_cand.is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
// Deprecated: cannot use std::set, because its element is constant.
//inline void Searching::search_in_sequential(
// const idi query_id,
// const idi K,
// const idi L,
//// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K) const
//{
// std::set<Candidate> set_L;
// boost::dynamic_bitset<> is_visited(num_v_);
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// is_visited[init_ids[v_i]] = true;
// }
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// set_L.emplace(v_id, dist, false);
// }
//// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
//// Candidate &top_cand = set_L[k];
// std::set<Candidate>::iterator top_cand = std::next(set_L.begin(), k);
// unsigned nk = L;
// if (!top_cand->is_checked_) {
// top_cand->is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
/* Function:
* queue1_size is fixed.
*/
inline idi Searching::merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
if (insert_index == queue1_size) {
return insert_index;
} else if (insert_index == queue1_size - 1) {
queue1[queue1_start + insert_index] = queue2[queue2_start];
return insert_index;
}
// Insert the 1st of queue2
if (queue2[queue2_start].id_ != it_loc->id_) {
// Not Duplicate
insert_one_element_at(
queue2[queue2_start],
queue1,
insert_index,
queue1_start,
queue1_size);
}
if (queue2_size == 1) {
return insert_index;
}
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
const idi q_i_1_bound = queue1_start + queue1_size;
const idi q_i_2_bound = queue2_start + queue2_size;
// const idi insert_i_bound = queue1_start + limit_size;
for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) {
if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) {
// queue1 or queue2 finished traverse. Rest o
break;
} else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else if (queue2[q_i_2] < queue1[q_i_1]) {
// Insert queue2[q_i_2] into queue1
insert_one_element_at(
queue2[q_i_2++],
queue1,
insert_i,
queue1_start,
queue1_size);
++q_i_1;
} else {
// Duplicate
++q_i_2;
++q_i_1;
}
}
return insert_index;
}
/* Function:
* queue1_size should be updated.
* queue1_length should be provided.
*/
inline void Searching::merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
if (insert_index == queue1_size) {
idi copy_count = (queue1_size + queue2_size > queue1_length) ?
queue1_length - queue1_size :
queue2_size;
memmove(queue1.data() + queue1_start + queue1_size,
queue2.data() + queue2_start,
copy_count * sizeof(Candidate));
queue1_size += copy_count;
return;
}
if (queue2[queue2_start].id_ != it_loc->id_) {
// Not Duplicate
add_into_queue_at(
queue2[queue2_start],
queue1,
insert_index,
queue1_start,
queue1_size,
queue1_length);
}
if (queue2_size == 1) {
return;
}
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
idi q_i_1_bound = queue1_start + queue1_size; // When queue1_size is updated, so should be q_i_1_bound.
const idi q_i_2_bound = queue2_start + queue2_size;
// idi insert_i;
for (idi insert_i = insert_index + 1; insert_i < queue1_length; ++insert_i) {
if (q_i_1 >= q_i_1_bound) {
queue1_size += std::min(queue1_length - insert_i, q_i_2_bound - q_i_2);
for ( ; insert_i < queue1_size; ++insert_i) {
queue1[queue1_start + insert_i] = queue2[q_i_2++];
}
break;
} else if (q_i_2 >= q_i_2_bound) {
break;
} else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else if (queue2[q_i_2] < queue1[q_i_1]) {
add_into_queue_at(
queue2[q_i_2++],
queue1,
insert_i,
queue1_start,
queue1_size,
queue1_length);
++q_i_1;
q_i_1_bound = queue1_start + queue1_size;
} else {
// Duplicate
++q_i_2;
++q_i_1;
}
}
}
inline idi Searching::merge_all_queues_para_list(
std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<idi> &local_queues_ends,
std::vector<Candidate> &set_L,
const idi L)
{
int size = 1 << (static_cast<idi>(log2(num_threads_)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
#pragma omp parallel for
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi bi = i + (1 << d) - 1; // i + 2^d - 1
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
local_queues_list[ai].swap(local_queues_list[bi]);
std::swap(local_queues_ends[ai], local_queues_ends[bi]);
continue;
}
// else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) {
// local_queues_list[ai].swap(local_queues_list[bi]);
// std::swap(local_queues_ends[ai], local_queues_ends[bi]);
// }
// merge_two_queues_into_1st_queue_seq(
// local_queues_list[ai],
// 0,
// local_queues_ends[ai],
// local_queues_list[bi],
// 0,
// local_queues_ends[bi]);
idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi];
std::vector<Candidate> tmp_queue(tmp_length);
std::merge(
local_queues_list[ai].begin(),
local_queues_list[ai].begin() + local_queues_ends[ai],
local_queues_list[bi].begin(),
local_queues_list[bi].begin() + local_queues_ends[bi],
tmp_queue.begin());
if (tmp_length > L) {
tmp_queue.resize(L);
tmp_length = L;
} else if (tmp_length < L) {
tmp_queue.resize(L);
}
local_queues_list[ai].swap(tmp_queue);
local_queues_ends[ai] = tmp_length;
// {// Print queue a
// printf("d: %u "
// "i: %u "
// "ai: %u "
// "local_queues_ends[%d]: %d\n",
// d,
// i,
// ai,
// ai,
// local_queues_ends[ai]);
// for (idi i_q = 0; i_q < local_queues_ends[ai]; ++i_q) {
// printf("[%u]: "
// "id: %u "
// "dist: %f\n",
// i_q,
// local_queues_list[ai][i_q].id_,
// local_queues_list[ai][i_q].distance_);
// }
// }
}
}
// Remain, prefix-sum-like merge
if (size != num_threads_) {
for (int i = size; i < num_threads_; ++i) {
idi ai = i;
idi bi = i - 1;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
local_queues_list[ai].swap(local_queues_list[bi]);
std::swap(local_queues_ends[ai], local_queues_ends[bi]);
continue;
}
// else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) {
// local_queues_list[ai].swap(local_queues_list[bi]);
// std::swap(local_queues_ends[ai], local_queues_ends[bi]);
// }
// merge_two_queues_into_1st_queue_seq(
// local_queues_list[ai],
// 0,
// local_queues_ends[ai],
// local_queues_list[bi],
// 0,
// local_queues_ends[bi]);
idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi];
std::vector<Candidate> tmp_queue(tmp_length);
std::merge(
local_queues_list[ai].begin(),
local_queues_list[ai].begin() + local_queues_ends[ai],
local_queues_list[bi].begin(),
local_queues_list[bi].begin() + local_queues_ends[bi],
tmp_queue.begin());
if (tmp_length > L) {
tmp_queue.resize(L);
tmp_length = L;
} else if (tmp_length < L) {
tmp_queue.resize(L);
}
local_queues_list[ai].swap(tmp_queue);
local_queues_ends[ai] = tmp_length;
}
}
// Merge into set_L
idi r = L;
if (local_queues_ends[num_threads_ - 1]) {
r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
0,
L,
local_queues_list[num_threads_ - 1],
0,
local_queues_ends[num_threads_ - 1]);
}
// Reset local_queues_ends
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return r;
}
/* Function:
* Use large local_queues_array as a concatenation of all queues
*/
inline idi Searching::merge_all_queues_para_array(
std::vector<Candidate> &set_L,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
const idi L)
{
const int num_queues = num_threads_;
idi nk = L;
int size = 1 << (static_cast<idi>(log2(num_queues)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
#pragma omp parallel for
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi a_start = ai * local_queue_length;
idi bi = i + (1 << d) - 1; // i + 2^d - 1
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_queues - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Remain, prefix-sum-like merge
if (size != num_queues) {
for (int i = size; i < num_queues; ++i) {
idi ai = i;
idi a_start = ai * local_queue_length;
idi bi = i - 1;
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_queues - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Reset local_queues_ends
// Not do this for Collector Idea or Selecting Idea
std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return nk;
// return r;
}
/* Function:
* When merge all queues (in an array, and [num_threads_ - 1] is the global queue),
* the starting local is at [queue_base]
*/
inline idi Searching::merge_all_queues_queue_base(
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi queue_base,
const int real_threads,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L)
{
idi nk = L;
int size = 1 << (static_cast<idi>(log2(real_threads)));
// int size = 1 << (static_cast<idi>(log2(num_threads_)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
idi by = 1 << (d + 1);
idi i_bound = size + queue_base;
#pragma omp parallel for num_threads(real_threads)
for (idi i = queue_base; i < i_bound; i += by) {
// for (int i = 0; i < size; i += by) {
// idi ai = i + (1 << (d + 1)) - 1 + queue_base; // i + 2^(d+1) - 1
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi a_start = ai * local_queue_length;
// idi bi = i + (1 << d) - 1 + queue_base; // i + 2^d - 1
idi bi = i + (1 << d) - 1; // i + 2^d - 1
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
// local_queues_list[ai].swap(local_queues_list[bi]);
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_threads_ - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Remain, prefix-sum-like merge
if (size != real_threads) {
// if (size != num_threads_) {
for (int i = size + queue_base; i < num_threads_; ++i) {
// for (int i = size; i < num_threads_; ++i) {
idi ai = i;
idi a_start = ai * local_queue_length;
idi bi = i - 1;
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_threads_ - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Reset local_queues_ends
std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return nk;
// return r;
}
inline void Searching::merge_two_consecutive_queues_in_place(
std::vector<Candidate> &two_queues,
const idi base_1,
// const idi &end_1,
const idi base_2,
const idi &length_2)
{
// idi tid = omp_get_thread_num();
idi index_1 = base_1;
idi index_2 = base_2;
const idi bound_2 = base_2 + length_2;
while (index_1 < index_2
&& index_2 < bound_2) {
Candidate e_1 = two_queues[index_1];
Candidate e_2 = two_queues[index_2];
if (e_1 < e_2) {
++index_1;
} else if (e_2 < e_1) {
// time_memmove_list_[tid] -= WallTimer::get_time_mark();
std::memmove(two_queues.data() + index_1 + 1,
two_queues.data() + index_1,
(index_2 - index_1) * sizeof(Candidate));
// time_memmove_list_[tid] += WallTimer::get_time_mark();
two_queues[index_1] = e_2;
++index_1;
++index_2;
} else { // Duplicate, but have no idea what to do right now
// time_memmove_list_[tid] -= WallTimer::get_time_mark();
std::memmove(two_queues.data() + index_1 + 1,
two_queues.data() + index_1,
(index_2 - index_1) * sizeof(Candidate));
// time_memmove_list_[tid] += WallTimer::get_time_mark();
index_1 += 2;
++index_2;
}
}
}
///* Function:
// * Merge all queues to the global queue, in a two-queue-merge way
// */
//inline idi Searching::merge_all_queues_all_together_in_sequential(
// std::vector<Candidate> &set_L,
// std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L)
//{
// const idi num_queues = num_threads_;
// const idi global_queue_base = (num_queues - 1) * local_queue_length;
// std::vector<idi> queue_heads(num_queues, 0);
// idi queue_id_min;
//
//// bool is_finished = false;
// bool is_1st_selected = true;
// idi nk = L; // The highest location of insertion.
// {
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// if (0 == local_queues_ends[q_i]) {
// continue;
// }
// _mm_prefetch(set_L.data() + q_i * local_queue_length, _MM_HINT_T0);
// }
// }
// while (queue_heads[num_queues - 1] < L) {
//// time_compare_minimum_ -= WallTimer::get_time_mark();
// queue_id_min = min_all_queues_at_heads(
// set_L,
// queue_heads,
// local_queues_ends,
// local_queue_length,
// L);
//// time_compare_minimum_ += WallTimer::get_time_mark();
// if (queue_id_min != num_queues - 1) { // Not in the global queue
//// time_insert_ -= WallTimer::get_time_mark();
// insert_one_element_at(
// set_L[queue_heads[queue_id_min] + queue_id_min * local_queue_length],
// set_L,
// queue_heads[num_queues - 1],
// global_queue_base,
// L);
//// time_insert_ += WallTimer::get_time_mark();
// if (is_1st_selected) { // Get the highest inserting location
// is_1st_selected = false;
// nk = queue_heads[num_queues - 1];
// }
// ++queue_heads[queue_id_min];
// }
// ++queue_heads[num_queues - 1];
// }
//
// // Reset local_queues_ends
// std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// return nk;
//}
///* Function:
// * Find the minimum among queues at their head locations
// */
//inline idi Searching::min_all_queues_at_heads(
// const std::vector<Candidate> &set_L,
// std::vector<idi> &queue_heads,
// const std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L)
//{
// const idi num_queues = num_threads_;
// idi min_queue_id = num_queues - 1;
// Candidate min_candidate = set_L[queue_heads[min_queue_id] + min_queue_id * local_queue_length];
//
// for (idi q_i = 0; q_i < num_queues - 1; ++q_i) {
// if (queue_heads[q_i] >= local_queues_ends[q_i]) { // q_i finished
// continue;
// }
// const Candidate &ele = set_L[queue_heads[q_i] + q_i * local_queue_length];
// if (ele < min_candidate) {
// min_candidate = ele;
// min_queue_id = q_i;
// } else if (ele.id_ == min_candidate.id_) { // Redundant element
// ++queue_heads[q_i];
// }
// }
//
// return min_queue_id;
//}
inline void Searching::merge_in_set_L(
std::vector<Candidate> &set_L,
const idi set_L_length,
const idi num_queues,
const idi local_queue_length)
{
idi size = 1 << (static_cast<idi>(log2(num_queues)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
const idi merge_length = (local_queue_length << d);
idi by = 1 << (d + 1);
// Parallel for
#pragma omp parallel for
for (idi i = 0; i < size; i += by) {
// idi a = i + (1 << d) - 1;
// idi b = i + (1 << (d + 1)) - 1;
idi a = i;
idi b = i + (1 << d);
idi base_a = a * local_queue_length;
idi base_b = b * local_queue_length;
if (base_a >= set_L_length || base_b >= set_L_length) {
continue;
}
idi length_b;
if (a + by < size) {
length_b = merge_length;
} else { // The last one
if (size == num_queues) {
length_b = set_L_length - base_b;
} else {
length_b = merge_length;
}
}
// printf("a: %u b: %u "
// "base_a: %u base_b: %u length_b: %u\n",
// a, b,
// base_a, base_b, length_b);
merge_two_consecutive_queues_in_place(
set_L,
base_a,
base_b,
length_b);
}
}
if (size != num_queues) {
for (idi i = size; i < num_queues; ++i) {
idi a = 0;
idi b = i;
idi base_a = a;
idi base_b = b * local_queue_length;
if (base_b >= set_L_length) {
continue;
}
idi length_b;
if (b != num_queues - 1) {
length_b = local_queue_length;
} else {
length_b = set_L_length - base_b;
}
// printf("a: %u b: %u "
// "base_a: %u base_b: %u length_b: %u\n",
// a, b,
// base_a, base_b, length_b);
merge_two_consecutive_queues_in_place(
set_L,
base_a,
base_b,
length_b);
}
}
}
/*
* 7/5/2020-20:27
* Every queue keeps only elements which can be ordered in the top-L globally.
* local_queues_lengths records the end location for all queues
*/
inline distf Searching::selecting_top_L_seq(
std::vector<Candidate> &set_L,
const idi global_L,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes)
{
std::vector<idi> pointers(num_queues, 0);
distf bound_lth;
idi rank = 0;
bool is_finished = false;
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_id;
while (rank < global_L) {
is_finished = true;
min_dist = FLT_MAX;
for (idi q_i = 0; q_i < num_queues; ++q_i) {
if (pointers[q_i] >= local_queues_sizes[q_i]) {
// q_i is finished
continue;
}
is_finished = false;
idi sub = pointers[q_i] + local_queues_starts[q_i];
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
}
}
if (is_finished) {
{//test
printf("Error: selecting_top_L_seq: only found %u elements but global_L is %u.\n",
rank,
global_L);
}
break;
}
bound_lth = min_dist;
++pointers[min_q_i];
++rank;
}
std::copy(pointers.begin(), pointers.end(), local_queues_sizes.begin());
return bound_lth;
}
/*
* 7/24/2020-10:08
* Record for every queue the position that contains the top-M unchecked vertices.
* So the total expanded vertices should still be M, which means the computation should
* be the same with merging idea.
*/
inline void Searching::selecting_unchecked_top_M_seq(
const idi query_id,
const idi iter,
std::vector<Candidate> &set_L,
const std::vector<idi> &pointers_starts,
const idi value_M,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
const std::vector<idi> &local_queues_sizes,
std::vector<idi> &local_m_counts)
{
std::vector<idi> pointers(pointers_starts);
// std::vector<idi> pointers(num_queues, 0);
std::fill(local_m_counts.begin(), local_m_counts.end(), 0);
idi rank = 0;
bool is_finished = true;
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_id;
while (rank < value_M) {
min_dist = FLT_MAX;
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi &pointer = pointers[q_i];
idi sub = pointer + local_queues_starts[q_i];
// {//test
// if (133 == query_id &&
// 3 == iter &&
// 321341 == set_L[sub].id_) {
// printf("(%u %f)\n",
// set_L[sub].id_, set_L[sub].distance_);
// }
// }
while (pointer < local_queues_sizes[q_i]
&& set_L[sub].is_checked_) {
++pointer;
++sub;
}
if (pointer >= local_queues_sizes[q_i]) {
// q_i is finished
continue;
}
is_finished = false;
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
}
}
if (!is_finished) {
is_finished = true;
++pointers[min_q_i];
++rank;
++local_m_counts[min_q_i];
} else {
break;
}
}
// std::copy(pointers.begin(), pointers.end(), local_top_m_positions.begin());
}
inline void Searching::search_with_top_m(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
boost::dynamic_bitset<> is_visited(num_v_);
{
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
unsigned nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
}
inline void Searching::search_with_top_m_scale_m(
const PANNS::idi value_M_max,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
// boost::dynamic_bitset<> is_visited(num_v_);
{
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
// std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
while (k < L) {
++tmp_count;
unsigned nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
if (M < value_M_max) {
M <<= 1;
}
}
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
{// Reset
is_visited.reset();
}
}
////void Searching::search_with_top_m(
//inline void Searching::search_with_top_m_to_get_distance_range(
// const PANNS::idi M,
// const PANNS::idi query_id,
//// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids)
//// std::vector<idi> &set_K)
//{
// dist_max_ = -FLT_MAX;
// dist_min_ = FLT_MAX;
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
//// {// For distance range
//// if (dist > dist_max_) {
//// dist_max_ = dist;
//// }
//// if (dist < dist_min_) {
//// dist_min_ = dist;
//// }
//// }
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
//// {// For distance range
//// if (dist > dist_max_) {
//// dist_max_ = dist;
//// }
//// if (dist < dist_min_) {
//// dist_min_ = dist;
//// }
//// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// For histogram
// for (idi i_l = 0; i_l < L; ++i_l) {
// distf dist = set_L[i_l].distance_;
// {// For distance range
// if (dist > dist_max_) {
// dist_max_ = dist;
// }
// if (dist < dist_min_) {
// dist_min_ = dist;
// }
// }
// }
// }
// }
//
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// set_K[k_i] = set_L[k_i].id_;
//// }
//}
//
////void Searching::search_with_top_m(
//inline void Searching::search_with_top_m_myths_M(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// {//test
//// printf("query_id: %u\n", query_id);
//// }
// const idi loc_range = L / 3;
//
//
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
//// {// For histogram
//// const distf dist_range = dist_max_ - dist_min_;
//// printf("iter:%u\n", 0);
//// for (idi i_l = 0; i_l < L; ++i_l) {
//// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0);
//// }
//// }
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// std::vector<idi> range_count(3, 0);
// idi zero_inserted_count = 0;
//// {//test
//// printf("tmp_count: %u\n", tmp_count);
//// }
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//// {//test
//// printf("top_m_candidates_ends: %u\n", top_m_candidates_end);
//// }
// {
// if (0 == top_m_candidates_end) {
// break;
// }
// }
//
//
// uint64_t count_neighbors = 0;
// uint64_t count_inserted = 0;
// std::vector<idi> locs_to_count(M);
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
//
// count_neighbors += out_degree;
// idi num_inserted = 0;
//
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// ++num_inserted;
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
//// {
//// printf("c_i: %u "
//// "count: %u "
//// "loc_inserted: %u\n",
//// c_i,
//// num_inserted,
//// r);
//// }
// if (r < nk) {
// nk = r;
// }
// {
// ++range_count[r / loc_range];
// }
// }
// {
// if (0 == num_inserted) {
// ++zero_inserted_count;
// }
// locs_to_count[c_i] = num_inserted;
// count_inserted += num_inserted;
// }
//// {
//// printf("c_i: %u "
//// "num_inserted: %u\n",
//// c_i,
//// num_inserted);
//// }
// }
//// {
//// for (idi c_i = top_m_candidates_end; c_i < M; ++c_i) {
//// locs_to_count[c_i] = 0;
//// }
//// printf("iter:%u\n", tmp_count);
//// for (idi c_i = 0; c_i < M; ++c_i) {
//// printf("%u %u\n", c_i, locs_to_count[c_i]);
//// }
//// }
//// {//test
//// idi sum = 0;
//// for (const idi ct : range_count) sum += ct;
//// printf("tmp_count: %u "
//// "k: %u "
//// "actual_M: %u %.1f%% "
//// "zero_ins: %u %.1f%% "
//// "1/3: %u %.1f%% "
//// "2/3: %u %.1f%% "
//// "3/3: %u %.1f%%\n",
//// tmp_count,
//// k,
//// top_m_candidates_end, 100.0 * top_m_candidates_end / M,
//// zero_inserted_count, 100.0 * zero_inserted_count / top_m_candidates_end,
//// range_count[0], 100.0 * range_count[0] / sum,
//// range_count[1], 100.0 * range_count[1] / sum,
//// range_count[2], 100.0 * range_count[2] / sum);
//// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {
// printf("query:%uiter: %u "
// "#neighbors: %lu "
// "#inserted: %lu "
// "ratio: %.2f%%\n",
// query_id, tmp_count,
// count_neighbors,
// count_inserted,
// 100.0 * count_inserted / count_neighbors);
// }
//// {// For histogram
////// const auto it_min = std::min_element(set_L.begin(), set_L.end());
////// const auto it_max = std::max_element(set_L.begin(), set_L.end());
////// const distf dist_min = it_min->distance_;
////// const distf dist_max = it_max->distance_;
////// const distf dist_min = it_min->distance_ - 1.0;
////// const distf dist_max = it_max->distance_ + 1.0;
//// const distf dist_range = dist_max_ - dist_min_;
////// const distf dist_range = dist_max - dist_min;
////// {
////// printf("it_min->distance_: %f dist_min: %f\n",
////// it_min->distance_, dist_min);
////// }
////// const distf dist_range = it_max->distance_ - it_min->distance_;
//// printf("iter:%u\n", tmp_count);
//// for (idi i_l = 0; i_l < L; ++i_l) {
////// printf("%f\n", set_L[i_l].distance_);
////// printf("%f\n", (set_L[i_l].distance_ - dist_min) / dist_range * 100.0);
//// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0);
////// printf("%.2f\n", (set_L[i_l].distance_ - it_min->distance_) / dist_range * 100.0);
//// }
//// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
// if (query_id == 3) {
// exit(1);
// }
//}
//
//// Sequential Top-M algorithm for profiling purpose: byte array, CAS, and OpenMP
////void Searching::search_with_top_m(
//inline void Searching::search_with_top_m_profile_bit_CAS(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// std::vector<uint8_t> is_visited(num_v_, 0); // Byte array
//// boost::dynamic_bitset<> is_visited(num_v_); // Bit array
// BitVector is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
//// is_visited[init_ids[c_i]] = true;
// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = true;
//
//// if (!AtomicOps::CAS(is_visited.data() + nb_id,
//// static_cast<uint8_t>(0),
//// static_cast<uint8_t>(1))) {
//// continue;
//// }
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
///// Backup
//inline void Searching::search_with_top_m(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
//
////// DEPRECATED: the is_visited array cannot be shared among threads.
//inline void Searching::search_with_top_m_no_local_arrays(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// boost::dynamic_bitset<> &is_visited)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
inline void Searching::search_with_top_m_in_batch(
const PANNS::idi M,
const PANNS::idi batch_start,
const PANNS::idi batch_size,
const PANNS::idi K,
const PANNS::idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list)
{
std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_));
// Prepare the init_ids
{
//#pragma omp parallel for
for (idi q_i = 0; q_i < batch_size; ++q_i) {
auto &is_visited = is_visited_list[q_i];
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
}
// Initialize set_L_list
{
//#pragma omp parallel for
for (idi q_i = 0; q_i < batch_size; ++q_i) {
const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_;
for (idi i = 0; i < L; i++) {
idi v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L);
}
}
{
std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates
idi joint_queue_end = 0;
boost::dynamic_bitset<> is_in_joint_queue(num_v_);
// std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id].
// std::vector<idi> cands_query_ids_ends(num_v_, 0);
std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M);
std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchecked candidate.
std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted
std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked
std::vector<idi> queries_not_finished(batch_size);
idi queries_not_finished_end = batch_size;
for (idi q_i = 0; q_i < batch_size; ++q_i) {
queries_not_finished[q_i] = q_i;
}
bool is_finished = false;
idi counter_for_debug = 0;
while (!is_finished) {
++counter_for_debug;
// Build the new joint queue
// Traverse every query's queue
for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) {
idi q_local_id = queries_not_finished[q_i];
// last_ks[q_local_id] = L;
auto &set_L = set_L_list[q_local_id];
idi top_m_count = 0;
for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
set_L[c_i].is_checked_ = true;
last_ks[q_local_id] = c_i;
++top_m_count;
idi cand_id = set_L[c_i].id_;
// Record which query selected cand_id
auto tmp_c = cands_query_ids.find(cand_id);
if (tmp_c != cands_query_ids.end()) {
tmp_c->second.push_back(q_local_id);
} else {
cands_query_ids.emplace(cand_id, std::vector<idi>());
cands_query_ids[cand_id].reserve(batch_size);
cands_query_ids[cand_id].push_back(q_local_id);
}
// cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id;
// Add candidate cand_id into the joint queue
if (is_in_joint_queue[cand_id]) {
continue;
}
is_in_joint_queue[cand_id] = true;
joint_queue[joint_queue_end++] = cand_id;
}
}
queries_not_finished_end = 0; // Clear queries_not_finished
// Traverse every shared candidate
for (idi c_i = 0; c_i < joint_queue_end; ++c_i) {
idi cand_id = joint_queue[c_i];
is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
const auto &query_local_ids = cands_query_ids[cand_id];
// Push neighbors to every queue of the queries that selected cand_id.
// Traverse cand_id's neighbors
// idi &q_i_bound = cands_query_ids_ends[cand_id];
// for (idi q_i = 0; q_i < q_i_bound; ++q_i) {
// idi q_local_id = query_local_ids[q_i];
for (idi q_local_id : query_local_ids) {
dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_;
auto &is_visited = is_visited_list[q_local_id];
auto &set_L = set_L_list[q_local_id];
// // Traverse cand_id's neighbors
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate new_cand(nb_id, dist, false);
idi insert_loc = insert_into_queue(set_L, L, new_cand);
if (insert_loc < nks[q_local_id]) {
nks[q_local_id] = insert_loc;
}
}
}
cands_query_ids.erase(cand_id);
// q_i_bound = 0; // Clear cands_query_ids[cand_id]
}
joint_queue_end = 0; // Clear joint_queue
for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) {
if (nks[q_local_id] <= last_ks[q_local_id]) {
ks[q_local_id] = nks[q_local_id];
} else {
ks[q_local_id] = last_ks[q_local_id] + 1;
}
nks[q_local_id] = L;
last_ks[q_local_id] = L;
if (ks[q_local_id] < L) {
queries_not_finished[queries_not_finished_end++] = q_local_id;
}
}
if (!queries_not_finished_end) {
is_finished = true;
}
}
}
{
for (idi q_i = 0; q_i < batch_size; ++q_i) {
for (idi c_i = 0; c_i < K && c_i < L; ++c_i) {
set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_;
}
}
}
////
// {//test
// for (idi q_i = 0; q_i < batch_size; ++q_i) {
// printf("query: %u\n", q_i + batch_start);
// for (idi c_i = 0; c_i < K; ++c_i) {
// printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_);
// }
// }
// }
}
//inline void Searching::para_search_with_top_m_critical_area(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
////#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
//
//inline void Searching::para_search_with_top_m_critical_area_no_omp(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
////#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
//
//inline void Searching::para_search_with_top_m_critical_area_yes_omp(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
//
//inline void Searching::para_search_with_top_m_visited_array(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// uint64_t count_visited = 0;
//
//// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
////#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// ++count_visited;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//// ++count_visited;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//
//// {
//// printf("query_id: %u "
//// "count_visited: %lu %f%%\n",
//// query_id,
//// count_visited,
//// 100.0 * count_visited / num_v_);
//// }
//}
//
//inline void Searching::para_search_with_top_m_merge_queues(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// {//test
//// printf("query_id: %u\n", query_id);
//// }
//// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_;
// const idi local_queue_length = L;
// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
// std::vector<idi> local_queues_ends(num_threads_, 0);
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
//// // Merge. Parallel merging in every two queues.
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_para(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
////// idi r = merge_two_queues_into_1st_queue_seq(
////// set_L,
////// 0,
////// L,
////// local_queues_list[tid],
////// 0,
////// local_queues_ends[tid]);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//// {// text
//// if (query_id == 4 &&
//// tmp_count == 5) {
//// // Print local queues
//// for (int t_i = 0; t_i < num_threads_; ++t_i) {
////// idi start_i = t_i * local_queue_length;
//// for (idi q_i = 0; q_i < local_queues_ends[t_i]; ++q_i) {
//// printf("t[%u][%u]: "
//// "id: %u "
//// "dist: %f\n",
//// t_i, q_i,
//// local_queues_list[t_i][q_i].id_,
//// local_queues_list[t_i][q_i].distance_);
//// }
//// }
//// printf("----------\n");
//// for (idi i = 0; i < L; ++i) {
//// printf("set_L[%u]: "
//// "id: %u "
//// "dist: %f\n",
//// i,
//// set_L[i].id_,
//// set_L[i].distance_);
//// }
//// printf("----------\n");
//// }
//// }
// // Merge. Merge all queues in parallel.
// {
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_list(
// local_queues_list,
// local_queues_ends,
// set_L,
// L);
// if (r < nk) {
// nk = r;
// }
// } else {
// if (local_queues_ends[0]) {
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
// local_queues_list[0],
// 0,
// local_queues_ends[0]);
// local_queues_ends[0] = 0;
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
//// {//test
//// if (query_id == 4) {
//// for (idi i = 0; i < L; ++i) {
//// printf("tmp_count: %u "
//// "set_L[%u]: "
//// "id: %u "
//// "dist: %f\n",
//// tmp_count,
//// i,
//// set_L[i].id_,
//// set_L[i].distance_);
//// }
//// }
////
//// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//// {
//// exit(1);
//// }
//// {//test
////
////// if (query_id == 4) {
//// for (idi i = 0; i < L; ++i) {
//// printf("set_L[%u]: "
//// "id: %u "
//// "dist: %f\n",
//// i,
//// set_L[i].id_,
//// set_L[i].distance_);
//// }
////// exit(1);
////// }
//// }
//}
//
////// Using local queue and then sequential merge.
//inline void Searching::para_search_with_top_m_queues_seq_merge(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// const idi local_queue_length = ((L - 1) / num_threads_ + 1) * width_;
// const idi local_queue_length = L;
// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
// std::vector<idi> local_queues_ends(num_threads_, 0);
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//// for (idi v_i = 0; v_i < L; ++v_i) {
//// idi v_id = init_ids[v_i];
//// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
//// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//// {
//// printf("tmp_count: %u "
//// "k: %u\n",
//// tmp_count,
//// k);
//// }
//
//// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
//// idi r;
////#pragma omp critical
//// {
//// r = insert_into_queue(set_L, L, cand);
//// if (r < nk) {
//// nk = r;
//// }
//// }
// // Add to the local queue.
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
// // Merge
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
// local_queues_list[tid],
// 0,
// local_queues_ends[tid]);
//// L + 1);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
//
//inline void Searching::para_search_with_top_m_merge_queues_no_CAS(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length,
// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<idi> &local_queues_ends,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
//{
////// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_;
//// const idi local_queue_length = L;
//// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
//// std::vector<idi> local_queues_ends(num_threads_, 0);
////// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
//
//// if (!AtomicOps::CAS(is_visited.data() + nb_id,
//// static_cast<uint8_t>(0),
//// static_cast<uint8_t>(1))) {
//// continue;
//// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
//// // Merge. Parallel merging in every two queues.
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_para(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
////// idi r = merge_two_queues_into_1st_queue_seq(
////// set_L,
////// 0,
////// L,
////// local_queues_list[tid],
////// 0,
////// local_queues_ends[tid]);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//// // Merge. Merge all queues in parallel.
//// {
//// if (num_threads_ > 1) {
//// idi r = merge_all_queues_para(
//// local_queues_list,
//// local_queues_ends,
//// set_L,
//// L);
//// if (r < nk) {
//// nk = r;
//// }
//// } else {
//// if (local_queues_ends[0]) {
//// idi r = merge_two_queues_into_1st_queue_seq(
//// set_L,
//// 0,
//// L,
//// local_queues_list[0],
//// 0,
//// local_queues_ends[0]);
//// local_queues_ends[0] = 0;
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//// }
// // Merge
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
// local_queues_list[tid],
// 0,
// local_queues_ends[tid]);
//// L + 1);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
// is_visited.reset();
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//}
//inline void Searching::para_search_with_top_m_merge_queues_in_array(
//inline void Searching::para_search_with_top_m_merge_queues_new_threshold(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited)
//// std::vector<uint8_t> &is_visited)
//// boost::dynamic_bitset<> &is_visited)
//{
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
//// is_visited[init_ids[c_i]] = 1;
// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// idi min_index = L - 1;
// distf min_1st = set_L[min_index].distance_;
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// const idi local_queue_start = tid * local_queue_length;
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// { // Sequential edition
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//// }
//// { // __ATOMIC_SEQ_CST edition
//// if (!AtomicOps::CAS(is_visited.data() + nb_id,
//// static_cast<uint8_t>(0),
//// static_cast<uint8_t>(1))) {
//// continue;
//// }
//// }
//// {// Acquire and Release edition
//// if (__atomic_load_n(is_visited.data() + nb_id, __ATOMIC_ACQUIRE)) {
//// continue;
//// }
//// __atomic_store_n(is_visited.data() + nb_id, 1, __ATOMIC_RELEASE);
//// }
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
//
// if (dist > min_1st) {
// continue;
// } else if (min_index > 0) {
// // Inserted, so min_1st needs update
// if (dist > set_L[min_index - 1].distance_) {
// min_1st = dist;
// if (min_index < L - 1) {
// ++min_index;
// }
// } else {
// min_1st = set_L[--min_index].distance_;
// }
//// min_1st = set_L[--min_index].distance_;
// }
//
//// if (dist > set_L[L-1].distance_) {
//// continue;
//// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
//// // Merge. Parallel merging in every two queues.
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_para(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
////// idi r = merge_two_queues_into_1st_queue_seq(
////// set_L,
////// 0,
////// L,
////// local_queues_list[tid],
////// 0,
////// local_queues_ends[tid]);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
// // Merge. Merge all queues in parallel.
// {
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_array(
//// local_queues_list,
// local_queues_array,
// local_queues_ends,
// local_queue_length,
// set_L,
// L);
// if (r < nk) {
// nk = r;
// }
// } else {
// if (local_queues_ends[0]) {
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
//// local_queues_list[0],
// local_queues_array,
// 0,
// local_queues_ends[0]);
// local_queues_ends[0] = 0;
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
//// // Merge Sequentially
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_seq_fixed(
//// set_L,
//// 0,
//// L,
////// local_queues_list[tid],
////// 0,
//// local_queues_array,
//// tid * local_queue_length,
//// local_queues_ends[tid]);
////// L + 1);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// is_visited.reset();
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//}
/*
* 5/7/2020-15:14
* Use 1 threads to scale M until the value_M_middle.
* Then use multiple threads.
*/
inline void Searching::para_search_with_top_m_merge_queues_middle_m(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
time_initialization_ -= WallTimer::get_time_mark();
// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
//#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
uint64_t tmp_count_computation = 0;
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(set_L.begin(), set_L.begin() + L);
std::sort(
set_L.begin() + base_set_L,
set_L.begin() + base_set_L + L);
local_queues_ends[num_threads_ - 1] = L;
time_initialization_ += WallTimer::get_time_mark();
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
time_sequential_phase_ -= WallTimer::get_time_mark();
{ // Single thread
while (k < L && M < value_M_middle) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
time_sequential_phase_ += WallTimer::get_time_mark();
time_parallel_phase_ -= WallTimer::get_time_mark();
uint64_t tmp_count_add_to_queue = 0;
double tmp_time_pick_top_m = 0;
double tmp_time_distance_computation = 0;
double tmp_time_add_to_queue = 0.0;
{ // Multiple Threads
while (k < L) {
time_expand_ -= WallTimer::get_time_mark();
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
time_pick_top_m_ -= WallTimer::get_time_mark();
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
time_pick_top_m_ += WallTimer::get_time_mark();
idi nk = L;
// Push M candidates' neighbors into the queue.
#pragma omp parallel for reduction(+ : tmp_count_computation) \
reduction(+ : tmp_count_add_to_queue) \
reduction(+ : tmp_time_pick_top_m) \
reduction(+ : tmp_time_distance_computation) \
reduction(+ : tmp_time_add_to_queue)
// for (int tid = 0; tid < num_threads_; ++tid) {
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
tmp_time_pick_top_m -= WallTimer::get_time_mark();
int tid = omp_get_thread_num();
idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
tmp_time_pick_top_m += WallTimer::get_time_mark();
for (idi e_i = 0; e_i < out_degree; ++e_i) {
tmp_time_distance_computation -= WallTimer::get_time_mark();
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
tmp_time_distance_computation += WallTimer::get_time_mark();
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
tmp_time_distance_computation += WallTimer::get_time_mark();
if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
++tmp_count_add_to_queue;
Candidate cand(nb_id, dist, false);
// Add to the local queue.
// tmp_time_pick_top_m -= WallTimer::get_time_mark();
tmp_time_add_to_queue -= WallTimer::get_time_mark();
if (0 != tid) {
// Non-Master threads using local queues
add_into_queue(
set_L,
(tid - 1) * local_queue_length,
local_queues_ends[tid - 1],
local_queue_length,
cand);
} else {
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
tmp_time_add_to_queue += WallTimer::get_time_mark();
// tmp_time_pick_top_m += WallTimer::get_time_mark();
}
}
time_add_to_queue_ += tmp_time_add_to_queue;
tmp_time_add_to_queue = 0;
// }
time_distance_computation_ += tmp_time_distance_computation;
tmp_time_distance_computation = 0;
time_pick_top_m_ += tmp_time_pick_top_m;
tmp_time_pick_top_m = 0;
top_m_candidates_end = 0; // Clear top_m_candidates
count_add_to_queue_ += tmp_count_add_to_queue;
tmp_count_add_to_queue = 0;
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
time_expand_ += WallTimer::get_time_mark();
// // Merge. Merge all queues in parallel.
{
time_merge_ -= WallTimer::get_time_mark();
if (num_threads_ > 1) {
idi r = merge_all_queues_para_array(
set_L,
local_queues_ends,
local_queue_length,
L);
if (r < nk) {
nk = r;
}
}
time_merge_ += WallTimer::get_time_mark();
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
time_parallel_phase_ += WallTimer::get_time_mark();
time_ending_ -= WallTimer::get_time_mark();
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + base_set_L].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
time_ending_ += WallTimer::get_time_mark();
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
}
inline void Searching::para_search_with_top_m_merge_queues_middle_m_no_merge(
const uint64_t computation_threshold,
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
const idi init_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
uint64_t count_single_query_computation = 0;
uint64_t count_init_computation = 0;
uint64_t count_seq_computation = 0;
uint64_t count_par_computation = 0;
// {//test
// printf("query_id: %u\n", query_id);
// }
// time_initialization_ -= WallTimer::get_time_mark();
// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
{
#pragma omp parallel for
for (idi c_i = 0; c_i < init_size; ++c_i) {
// for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
// is_visited.atomic_set_bit(init_ids[c_i]);
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < init_size; ++v_i) {
// for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
uint64_t tmp_count_computation = 0;
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < init_size; i++) {
// for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
count_init_computation += tmp_count_computation;
count_single_query_computation += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(set_L.begin(), set_L.begin() + L);
std::sort(
set_L.begin() + base_set_L,
set_L.begin() + base_set_L + init_size);
// set_L.begin() + base_set_L + L);
local_queues_ends[num_threads_ - 1] = init_size;
// local_queues_ends[num_threads_ - 1] = L;
// time_initialization_ += WallTimer::get_time_mark();
// time_sequential_phase_ -= WallTimer::get_time_mark();
// std::vector<idi> top_m_candidates(M);
idi &global_queue_size = local_queues_ends[num_threads_ - 1];
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
{ // Single thread
while (k < L && M < value_M_middle && count_single_query_computation <= computation_threshold) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// int real_threads = std::min(static_cast<int>(M), num_threads_);
// idi queue_base = num_threads_ - real_threads;
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) {
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) {
// if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
global_queue_size,
// local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
count_seq_computation += tmp_count_computation;
count_single_query_computation += tmp_count_computation;
tmp_count_computation = 0;
// {// Local queues' ends
// printf("query%u:iter: %u", query_id, tmp_count);
// for (int i_t = 0; i_t < num_threads_; ++i_t) {
// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
// }
// printf("\n");
// }
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
// time_sequential_phase_ += WallTimer::get_time_mark();
// time_parallel_phase_ -= WallTimer::get_time_mark();
{ // Multiple Threads
while (k < L and count_single_query_computation <= computation_threshold) {
// while (k < L) {
++tmp_count;
// {//test
// printf("tmp_count: %d "
// "k: %u "
// "global_queue_size: %u\n",
// tmp_count,
// k,
// global_queue_size);
// }
// int real_threads = std::min(static_cast<int>(M), num_threads_);
// idi queue_base = num_threads_ - real_threads;
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) {
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
//#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
int tid = omp_get_thread_num();
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) {
// if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Add to the local queue.
if (0 != tid) {
// Non-Master threads using local queues
add_into_queue(
set_L,
(tid - 1) * local_queue_length,
local_queues_ends[tid - 1],
local_queue_length,
cand);
} else {
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
global_queue_size,
// local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
count_par_computation += tmp_count_computation;
count_single_query_computation += tmp_count_computation;
tmp_count_computation = 0;
// {// Local queues' ends
// printf("query%u:iter: %u", query_id, tmp_count);
// for (int i_t = 0; i_t < num_threads_; ++i_t) {
// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
// }
// printf("\n");
// }
// Merge. Merge all queues in parallel.
{
if (num_threads_ > 1) {
// idi r = merge_all_queues_queue_base(
// set_L,
// local_queues_ends,
// queue_base,
// real_threads,
// local_queue_length,
// L);
idi r = merge_all_queues_para_array(
set_L,
local_queues_ends,
local_queue_length,
L);
if (r < nk) {
nk = r;
}
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
// {// Print relative distance
//// distf top_dist = set_L[base_set_L].distance_;
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%u %f\n",
// tmp_count, set_L[i_l + base_set_L].distance_);
//// tmp_count, set_L[i_l + base_set_L].distance_ - top_dist);
// }
// }
}
}
// time_parallel_phase_ += WallTimer::get_time_mark();
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + base_set_L].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
// {//test
// printf("count_single: %lu "
// "ct_init: %lu "
// "ct_seq: %lu "
// "ct_par: %lu\n",
// count_single_query_computation,
// count_init_computation,
// count_seq_computation,
// count_par_computation);
// }
}
///*
// * 6/15/2020-14:40
// * Queues merging together to the global queue
// */
//inline void Searching::para_search_with_top_m_merge_queues_sequential_merge(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited)
//{
//// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
// local_queues_ends[num_threads_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// { // Single thread
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// { // Multiple Threads
// while (k < L) {
// ++tmp_count;
//// {//test
//// if (num_threads_ == 2) {
//// printf("tmp_count: %d "
//// "k: %u\n",
//// tmp_count,
//// k);
//// }
//// }
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
//// // Merge. Merge all queues in parallel.
// {
//// {//test
//// for (idi q_i = 0; q_i < num_threads_; ++q_i) {
//// if (0 == local_queues_ends[q_i]) {
//// continue;
//// }
//// for (idi e_i = 0; e_i < local_queues_ends[q_i]; ++e_i) {
//// printf("tmp_count: %u "
//// "q_i: %u "
//// "[%u]: (%u, %f)\n",
//// tmp_count,
//// q_i,
//// e_i, set_L[q_i * local_queue_length + e_i].id_, set_L[q_i * local_queue_length + e_i].distance_);
//// }
//// }
//// }
//// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_ > 1) {
// idi r = merge_all_queues_all_together_in_sequential(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
//// idi r = merge_all_queues_para_array(
//// set_L,
//// local_queues_ends,
//// local_queue_length,
//// L);
// if (r < nk) {
// nk = r;
// }
//// {//test
//// printf("tmp_count: %u "
//// "r: %u "
//// "last_k: %u\n",
//// tmp_count,
//// r,
//// last_k);
//// for (idi l_i = 0; l_i < L; ++l_i) {
//// printf("tmp_count: %u "
//// "[%u]: (%u, %f)\n",
//// tmp_count,
//// l_i, set_L[l_i + base_set_L].id_, set_L[l_i + base_set_L].distance_);
//// }
//// }
// }
//
//// time_merge_ += WallTimer::get_time_mark();
// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// if (0 == query_id) {
//// exit(1);
//// }
//// }
//}
///*
// * 6/19/2020:
// * Intra-query + Inter-query
// */
//inline void Searching::para_search_with_top_m_nested_para(
// const idi batch_start,
// const idi batch_size,
// const idi value_M_middle,
// const idi value_M_max,
// const idi K,
// const idi L,
// std::vector< std::vector<Candidate> > &set_L_list,
// const std::vector<idi> &init_ids,
// std::vector< std::vector<idi> > &set_K_list,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_intra_query_ - 1) * local_queue_length;
// std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue
// std::vector< std::vector<idi> > &top_m_candidates_list,
// std::vector< boost::dynamic_bitset<> > &is_visited_list)
//{
// {// Initialize is_visited flag array
//#pragma omp parallel for num_threads(num_threads_inter_query_)
// for (idi q_i = 0; q_i < batch_size; ++q_i) {
// auto &is_visited = is_visited_list[q_i];
//#pragma omp parallel for num_threads(num_threads_intra_query_)
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
// }
//
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
//
// uint64_t tmp_count_total_computation = 0;
//#pragma omp parallel for num_threads(num_threads_inter_query_) reduction(+ : tmp_count_total_computation)
// for (idi q_i = 0; q_i < batch_size; ++q_i) {
// idi query_id = batch_start + q_i;
// auto &set_L = set_L_list[q_i];
// auto &local_queues_ends = local_queues_ends_list[q_i];
// auto &is_visited = is_visited_list[q_i];
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
////#pragma omp parallel for
//// for (idi v_i = 0; v_i < L; ++v_i) {
//// idi v_id = init_ids[v_i];
//// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
//// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_query_)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
//// count_distance_computation_ += tmp_count_computation;
// tmp_count_total_computation += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
// local_queues_ends[num_threads_intra_query_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// auto &top_m_candidates = top_m_candidates_list[q_i];
// { // Single thread
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
//// {//test
//// if (391655 == nb_id) {
//// printf("tmp_count: %u "
//// "nb_id: %u "
//// "distf: %f\n",
//// tmp_count,
//// nb_id,
//// dist);
//// }
//// }
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_intra_query_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//// count_distance_computation_ += tmp_count_computation;
// tmp_count_total_computation += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// { // Multiple Threads
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_query_)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
//// {//test
//// if (391655 == nb_id) {
//// printf("tmp_count: %u "
//// "nb_id: %u "
//// "distf: %f\n",
//// tmp_count,
//// nb_id,
//// dist);
//// }
//// }
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_intra_query_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//// count_distance_computation_ += tmp_count_computation;
// tmp_count_total_computation += tmp_count_computation;
// tmp_count_computation = 0;
//
//// // Merge. Merge all queues in parallel.
// {
//// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_intra_query_ > 1) {
// idi r = merge_all_queues_para_array(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
// if (r < nk) {
// nk = r;
// }
// }
//// time_merge_ += WallTimer::get_time_mark();
// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
// count_distance_computation_ += tmp_count_total_computation;
// tmp_count_total_computation = 0;
//
// auto &set_K = set_K_list[query_id];
//
//#pragma omp parallel for num_threads(num_threads_intra_query_)
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
// }
//
//// {//test
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//// {
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: (%u %f)\n",
//// k_i, set_L_list[0][k_i].id_, set_L_list[0][k_i].distance_);
//// }
//// if (0 == batch_start) {
//// exit(1);
//// }
//// }
//}
/*
* 6/22/2020-21:30
* Do searching on the local_set_L
* local_set_L is already sorted
* is_visited is already set up.
*/
inline void Searching::subsearch_with_top_m(
const idi value_M_max,
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &local_top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation)
{
const dataf *query_data = queries_load_ + query_id * dimension_;
// idi local_top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi iter = 0;
idi M = 1; // value of M
while (k < local_L) {
++iter;
subsearch_top_m_for_one_iteration(
iter,
k,
M,
query_id,
query_data,
local_L,
set_L,
set_L_start,
set_L_size,
local_top_m_candidates,
is_visited,
local_count_distance_computation);
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
// {//test
// printf("set_L_start: %u "
// "local_count_distance_computation: %lu\n",
// set_L_start,
// local_count_distance_computation);
// }
}
//// Backup
//inline void Searching::subsearch_with_top_m(
// const idi value_M_max,
// const idi query_id,
// const idi local_L,
// std::vector<Candidate> &set_L,
// const idi base_set_L,
// idi &set_L_end,
// std::vector<idi> &local_top_m_candidates,
// boost::dynamic_bitset<> &is_visited,
// uint64_t &local_count_distance_computation)
//{
// const dataf *query_data = queries_load_ + query_id * dimension_;
// idi local_top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi iter = 0;
// idi M = 1; // value of M
//
// while (k < local_L) {
// ++iter;
// // Select M candidates
// idi last_k = local_L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < set_L_end && local_top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = local_L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < local_top_m_candidates_end; ++c_i) {
// idi cand_id = local_top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++local_count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[set_L_end - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// set_L_end,
// local_L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// local_top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
//}
/*
* 7/6/2020-23:17
* Subsearch only 1 iteration using top-m
*/
inline void Searching::subsearch_top_m_for_one_iteration(
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation)
{
// Select M candidates
idi top_m_candidates_end = 0;
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) {
idi index_set_L = c_i + set_L_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// {//test
// M_ids_.push_back(set_L[index_set_L].id_);
// }
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[set_L_size - 1 + set_L_start].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = add_into_queue(
set_L,
set_L_start,
set_L_size,
L,
cand);
if (r < nk) {
nk = r;
}
}
}
// top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k_uc = nk;
} else {
k_uc = last_k + 1;
}
// {//test
// for (idi l_i = 0; l_i < set_L_size; ++l_i) {
// L_ids_.push_back(set_L[set_L_start + l_i].id_);
// }
// std::sort(L_ids_.begin(), L_ids_.end());
// std::sort(M_ids_.begin(), M_ids_.end());
// for (idi m_i = 0; m_i < M_ids_.size(); ++m_i) {
// printf("query_id: %u "
// "iter: %u "
// "M[%u]: "
// "%u\n",
// query_id,
// iter,
// m_i,
// M_ids_[m_i]);
// }
// M_ids_.clear();
// for (idi l_i = 0; l_i < L_ids_.size(); ++l_i) {
// printf("query_id: %u "
// "iter: %u "
// "L[%u]: "
// "%u\n",
// query_id,
// iter,
// l_i,
// L_ids_[l_i]);
// }
// L_ids_.clear();
// }
}
/*
* One more parameter for distance bound
*/
inline void Searching::subsearch_top_m_for_one_iteration_lth(
const distf bound_lth,
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation)
{
// Select M candidates
idi top_m_candidates_end = 0;
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) {
idi index_set_L = c_i + set_L_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > bound_lth) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = add_into_queue(
set_L,
set_L_start,
set_L_size,
L,
cand);
if (r < nk) {
nk = r;
}
}
}
if (nk <= last_k) {
k_uc = nk;
} else {
k_uc = last_k + 1;
}
}
/*
* 7/24/2020-10:53
* Subsearch for one iteration, with the global L-th value as the bound,
* and the top_m_position indicates the bound for local top-M vertices.
*/
inline void Searching::subsearch_top_m_for_one_iteration_lth_mth(
const distf bound_lth,
// const idi top_m_position,
const idi iter,
idi &k_uc,
const idi local_m_count,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation,
double &time_pick_top_m,
uint64_t &count_add_to_queue,
double &time_distance_computation,
double &time_add_to_queue)
{
// {//test
// printf("query_id: %u "
// "iter: %u "
// "tid: %u \n",
// query_id,
// iter,
// omp_get_thread_num());
// }
// Select M candidates
idi top_m_candidates_end = 0;
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k_uc; c_i < top_m_position; ++c_i) {
time_pick_top_m -= WallTimer::get_time_mark();
for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < local_m_count; ++c_i) {
idi index_set_L = c_i + set_L_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// {//test
// M_ids_.push_back(set_L[index_set_L].id_);
// }
}
time_pick_top_m += WallTimer::get_time_mark();
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
time_pick_top_m -= WallTimer::get_time_mark();
idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
time_pick_top_m += WallTimer::get_time_mark();
for (idi e_i = 0; e_i < out_degree; ++e_i) {
time_distance_computation -= WallTimer::get_time_mark();
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
time_distance_computation += WallTimer::get_time_mark();
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
time_distance_computation += WallTimer::get_time_mark();
if (dist > set_L[set_L_start + set_L_size - 1].distance_) {
// if (dist > bound_lth) {
continue;
}
++count_add_to_queue;
Candidate cand(nb_id, dist, false);
// time_pick_top_m -= WallTimer::get_time_mark();
time_add_to_queue -= WallTimer::get_time_mark();
idi r = add_into_queue(
set_L,
set_L_start,
set_L_size,
L,
cand);
if (r < nk) {
nk = r;
}
time_add_to_queue += WallTimer::get_time_mark();
// time_pick_top_m += WallTimer::get_time_mark();
}
}
if (nk <= last_k) {
k_uc = nk;
} else {
k_uc = last_k + 1;
}
}
/*
* 6/23/2020-13:37
* Is is good to use subsearch by every thread it self?
*/
inline void Searching::para_search_with_top_m_subsearch_v0(
// const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector< std::vector<idi> > &top_m_candidates_list,
boost::dynamic_bitset<> &is_visited)
{
uint64_t tmp_count_computation = 0;
{// Initialization
// is_visited flag array
//#pragma omp parallel for
// Cannot use OMP for bit array is_visited!
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// {//test
// printf("q_i: %u "
// "init_ids[%u]: "
// "count: %u "
// "id: %u "
// "dist: %f \n",
// query_id,
// i,
// tmp_count_computation,
// v_id,
// dist);
// }
}
// {//test
// printf("Initialization tmp_count_computation: %lu\n",
// tmp_count_computation);
// }
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(
// set_L.begin(),
// set_L.begin() + L);
}
idi queue_end = L;
// Searching
if (num_threads_ == 1) { // Single threads
std::sort(
set_L.begin(),
set_L.end());
subsearch_with_top_m(
value_M_max,
query_id,
L,
set_L,
0,
queue_end,
top_m_candidates_list[0],
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// {
// idi half_length = queue_end / 2;
// std::sort(
// set_L.begin(),
// set_L.begin() + half_length);
//
// subsearch_with_top_m(
// value_M_max,
// query_id,
// half_length,
// set_L,
// 0,
// half_length,
// top_m_candidates_list[0],
// is_visited,
// tmp_count_computation);
//
// std::sort(
// set_L.begin() + half_length,
// set_L.end());
// subsearch_with_top_m(
// value_M_max,
// query_id,
// half_length,
// set_L,
// half_length,
// half_length,
// top_m_candidates_list[0],
// is_visited,
// tmp_count_computation);
// count_distance_computation_ += tmp_count_computation;
//
// std::vector <Candidate> tmp_set_L(L);
// std::merge(set_L.begin(), set_L.begin() + half_length,
// set_L.begin() + half_length, set_L.end(),
// tmp_set_L.begin());
// std::copy(tmp_set_L.begin(), tmp_set_L.end(), set_L.begin());
// }
// {//test
// printf("q_i: %u "
// "count_distance_computation_: %lu\n",
// query_id,
// count_distance_computation_);
// }
} else { // Multiple threads
const idi num_queues = num_threads_;
const idi local_queue_length = (L - 1) / num_queues + 1;
// Parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi local_queue_base = q_i * local_queue_length;
if (local_queue_base >= L) {
continue;
}
idi local_queue_end = local_queue_length;
if (local_queue_base + local_queue_end > L) {
local_queue_end = L - local_queue_base;
}
std::sort(
set_L.begin() + local_queue_base,
set_L.begin() + local_queue_base + local_queue_end);
subsearch_with_top_m(
// 1,
value_M_max / num_queues, // value_M_max
// local_queue_end, // value_M_max
query_id,
local_queue_end, // local_L
set_L,
local_queue_base, // base_set_L
local_queue_end, // set_L_end
top_m_candidates_list[q_i],
is_visited,
tmp_count_computation);
// {//test
// printf("q_i: %u "
// "tmp_count_computation: %lu\n",
// q_i,
// tmp_count_computation);
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// }
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// {//test
// printf("query_id: %u "
// "count_distance_computation_: %lu\n",
// query_id,
// count_distance_computation_);
// }
// Merge
// time_merge_ -= WallTimer::get_time_mark();
merge_in_set_L(
set_L,
L,
num_queues,
local_queue_length);
// time_merge_ += WallTimer::get_time_mark();
// {//test
// printf("q_i: %u "
// "count_distance_computation_: %lu\n",
// query_id,
// count_distance_computation_);
// }
}
{// Return the results to set_K
// How to deal with duplicate?
idi last_id = set_L[0].id_;
set_K[0] = last_id;
idi k_i = 1;
idi l_i = 1;
while (k_i < K && l_i < L) {
if (last_id == set_L[l_i].id_) {
++l_i;
continue;
}
last_id = set_L[l_i++].id_;
set_K[k_i++] = last_id;
}
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
}
// {
// for (idi k_i = 0; k_i < L; ++k_i) {
// printf("q_i: %u "
// "k_i: %u "
// "id: %u "
// "dist: %f\n",
// query_id,
// k_i,
// set_L[k_i].id_,
// set_L[k_i].distance_);
// }
// }
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
}
// {//test
// if (0 == query_id) {
// exit(1);
// }
// }
}
/*
* 7/5/2020-18:38
* L-th Selection, and every thread does its own searching
*/
inline void Searching::para_search_with_top_m_subsearch_v1(
const idi local_M_max,
const idi query_id,
const idi K,
const idi global_L,
const idi local_L,
const idi total_L,
const idi init_queue_end,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const std::vector<idi> &local_queues_bases,
std::vector<idi> &local_queues_ends,
std::vector< std::vector<idi> > &top_m_candidates_list,
boost::dynamic_bitset<> &is_visited)
{
uint64_t tmp_count_computation = 0;
{// Initialization
// is_visited flag array
//#pragma omp parallel for
// Cannot use OMP for bit array is_visited!
for (idi c_i = 0; c_i < total_L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < total_L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (int q_i = 0; q_i < num_threads_; ++q_i) {
idi local_queue_base = local_queues_bases[q_i];
idi init_ids_base = q_i * init_queue_end;
idi init_ids_bound = init_ids_base + init_queue_end;
for (idi id_i = init_ids_base; id_i < init_ids_bound; ++id_i) {
idi v_id = init_ids[id_i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[local_queue_base++] = Candidate(v_id, dist, false); // False means not checked.
}
local_queues_ends[q_i] = init_queue_end;
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < total_L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
}
// Searching
if (num_threads_ == 1) { // Single threads
// local_queues_lengths[0] = local_L;
std::sort(
set_L.begin(),
set_L.end());
subsearch_with_top_m(
local_M_max,
query_id,
local_L,
set_L,
0,
local_queues_ends[0],
top_m_candidates_list[0],
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
} else { // Multiple threads
// std::fill(local_queues_lengths.begin(), local_queues_lengths.end(), local_L);
const dataf *query_data = queries_load_ + query_id * dimension_;
const idi num_queues = num_threads_;
#pragma omp parallel for
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi local_queue_base = local_queues_bases[q_i];
std::sort(
set_L.begin() + local_queue_base,
set_L.begin() + local_queue_base + init_queue_end);
}
distf bound_lth = FLT_MAX;
for (idi q_i = 0; q_i < num_queues; ++q_i) {
bound_lth = std::min(bound_lth, set_L[local_queues_bases[q_i] + init_queue_end - 1].distance_);
}
// const idi local_queue_length = (L - 1) / num_queues + 1;
std::vector<idi> ks(num_queues, 0);
idi iter = 0;
idi local_M = 1;
uint8_t not_finished = 1;
while (not_finished) {
not_finished = 0;
++iter;
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi &k = ks[q_i];
idi &local_queue_end = local_queues_ends[q_i];
auto &local_top_m_candidates = top_m_candidates_list[q_i];
if (k >= local_queue_end) {
continue;
}
not_finished = 1;
const idi local_queue_base = local_queues_bases[q_i];
subsearch_top_m_for_one_iteration_lth(
bound_lth,
iter,
k,
local_M,
query_id,
query_data,
local_L,
set_L,
local_queue_base,
local_queue_end,
local_top_m_candidates,
is_visited,
tmp_count_computation);
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
{// Setecting and update local_queues_lengths
// time_merge_ -= WallTimer::get_time_mark();
bound_lth = selecting_top_L_seq(
set_L,
global_L,
// local_L,
num_queues,
local_queues_bases,
local_queues_ends);
// time_merge_ += WallTimer::get_time_mark();
// {// local_queues_ends
// printf("query_id: %u "
// "iter: %u",
//// "local_queues_ends:",
// query_id,
// iter);
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// printf(" [%u]: %u",
// q_i,
// local_queues_ends[q_i]);
// }
// printf("\n");
// }
}
{// Scale M
if (local_M < local_M_max) {
local_M <<= 1;
}
// else {
// local_M = value_M_max;
// }
}
}
}
// time_merge_ -= WallTimer::get_time_mark();
{// Return the results to set_K
std::vector<idi> pointer(num_threads_, 0);
// get the first
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_sub;
idi last_id;
for (int q_i = 0; q_i < num_threads_; ++q_i) {
if (pointer[q_i] >= local_queues_ends[q_i]) {
continue;
}
idi sub = pointer[q_i] + local_queues_bases[q_i];
distf tmp_dist = set_L[sub].distance_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_q_i = q_i;
min_sub = sub;
}
}
set_K[0] = set_L[min_sub].id_;
++pointer[min_q_i];
last_id = set_K[0];
bool is_finished = false;
idi k_i = 1;
while (k_i < K && !is_finished) {
is_finished = true;
min_dist = FLT_MAX;
for (int q_i = 0; q_i < num_threads_; ++q_i) {
const idi local_queue_end = local_queues_ends[q_i];
if (pointer[q_i] >= local_queue_end) {
continue;
}
is_finished = false;
idi sub = pointer[q_i] + local_queues_bases[q_i];
while (set_L[sub].id_ == last_id
&& pointer[q_i] < local_queue_end) {
++pointer[q_i];
sub = pointer[q_i] + local_queues_bases[q_i];
}
if (pointer[q_i] >= local_queue_end) {
continue;
}
// if (set_L[sub].id_ == last_id) {
// // Duplicate
// ++pointer[q_i];
// continue;
// }
distf tmp_dist = set_L[sub].distance_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_q_i = q_i;
min_sub = sub;
}
}
set_K[k_i] = set_L[min_sub].id_;
++pointer[min_q_i];
++k_i;
}
}
// time_merge_ += WallTimer::get_time_mark();
// {// Return the results to set_K
// // How to deal with duplicate?
// idi last_id = set_L[0].id_;
// set_K[0] = last_id;
// idi k_i = 1;
// idi l_i = 1;
// while (k_i < K && l_i < L) {
// if (last_id == set_L[l_i].id_) {
// ++l_i;
// continue;
// }
// last_id = set_L[l_i++].id_;
// set_K[k_i++] = last_id;
// }
// }
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), init_queue_end);
}
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
}
//// Backup
//inline void Searching::para_search_with_top_m_subsearch_v1(
// const idi local_M_max,
// const idi query_id,
// const idi K,
// const idi total_L,
// const idi local_L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const std::vector<idi> &local_queues_bases,
// std::vector<idi> &local_queues_lengths,
// std::vector< std::vector<idi> > &top_m_candidates_list,
// boost::dynamic_bitset<> &is_visited)
//{
// uint64_t tmp_count_computation = 0;
// {// Initialization
// // is_visited flag array
////#pragma omp parallel for
//// Cannot use OMP for bit array is_visited!
// for (idi c_i = 0; c_i < total_L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < total_L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
//
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < total_L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(
//// set_L.begin(),
//// set_L.begin() + L);
// }
//// idi queue_end = L;
//
// // Searching
// if (num_threads_ == 1) { // Single threads
//// local_queues_lengths[0] = local_L;
// std::sort(
// set_L.begin(),
// set_L.end());
// subsearch_with_top_m(
// local_M_max,
// query_id,
// local_L,
// set_L,
// 0,
// local_queues_lengths[0],
// top_m_candidates_list[0],
// is_visited,
// tmp_count_computation);
// count_distance_computation_ += tmp_count_computation;
// } else { // Multiple threads
//// std::fill(local_queues_lengths.begin(), local_queues_lengths.end(), local_L);
// const dataf *query_data = queries_load_ + query_id * dimension_;
// const idi num_queues = num_threads_;
//#pragma omp parallel for
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// idi local_queue_base = local_queues_bases[q_i];
// std::sort(
// set_L.begin() + local_queue_base,
// set_L.begin() + local_queue_base + local_L);
// }
//// const idi local_queue_length = (L - 1) / num_queues + 1;
// std::vector<idi> ks(num_queues, 0);
// idi iter = 0;
// idi local_M = 1;
// uint8_t not_finished = 1;
// while (not_finished) {
// not_finished = 0;
// ++iter;
//
////#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// idi &k = ks[q_i];
// idi &local_queue_end = local_queues_lengths[q_i];
// auto &local_top_m_candidates = top_m_candidates_list[q_i];
// if (k >= local_queue_end) {
// continue;
// }
// not_finished = 1;
//// ++not_finished;
// const idi local_queue_base = local_queues_bases[q_i];
//
// // Select top-M unchecked vertices.
// idi last_k = local_L;
// idi local_top_m_candidates_end = 0;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < local_queue_end && local_top_m_candidates_end < local_M; ++c_i) {
// idi index_set_L = c_i + local_queue_base;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = local_L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < local_top_m_candidates_end; ++c_i) {
// idi cand_id = local_top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[local_queue_end - 1 + local_queue_end].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// local_queue_base,
// local_queue_end,
// local_L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// local_top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// {// Setecting and update local_queues_lengths
// time_merge_ -= WallTimer::get_time_mark();
// selecting_top_L_seq(
// set_L,
// total_L,
//// local_L,
// num_queues,
// local_queues_bases,
// local_queues_lengths);
//// merge_in_set_L(
//// set_L,
//// L,
//// num_queues,
//// local_queue_length);
// time_merge_ += WallTimer::get_time_mark();
// }
// {// Scale M
// if (local_M < local_M_max) {
// local_M <<= 1;
// }
//// else {
//// local_M = value_M_max;
//// }
// }
// }
// }
//
// {// Return the results to set_K
// std::vector<idi> pointer(num_threads_, 0);
// // get the first
// distf min_dist = FLT_MAX;
// idi min_q_i;
// idi min_sub;
// idi last_id;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// {//test
// if (local_queues_lengths[q_i] != local_L) {
// printf("What? local_queues_lengths[%u]: %u != local_L: %u\n",
// q_i, local_queues_lengths[q_i], local_L);
// }
// }
// if (pointer[q_i] >= local_queues_lengths[q_i]) {
// continue;
// }
// idi sub = pointer[q_i] + local_queues_bases[q_i];
// distf tmp_dist = set_L[sub].distance_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[0] = set_L[min_sub].id_;
// ++pointer[min_q_i];
// last_id = set_K[0];
//
// bool is_finished = false;
// idi k_i = 1;
// while (k_i < K && !is_finished) {
// is_finished = true;
// min_dist = FLT_MAX;
//// distf min_dist = FLT_MAX;
//// idi min_sub;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// if (pointer[q_i] >= local_queues_lengths[q_i]) {
// continue;
// }
// is_finished = false;
// idi sub = pointer[q_i] + local_queues_bases[q_i];
// if (set_L[sub].id_ == last_id) {
// // Duplicate
// ++pointer[q_i];
// continue;
// }
// distf tmp_dist = set_L[sub].distance_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[k_i] = set_L[min_sub].id_;
// ++pointer[min_q_i];
// ++k_i;
// }
// }
//// {// Return the results to set_K
//// // How to deal with duplicate?
//// idi last_id = set_L[0].id_;
//// set_K[0] = last_id;
//// idi k_i = 1;
//// idi l_i = 1;
//// while (k_i < K && l_i < L) {
//// if (last_id == set_L[l_i].id_) {
//// ++l_i;
//// continue;
//// }
//// last_id = set_L[l_i++].id_;
//// set_K[k_i++] = last_id;
//// }
//// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_lengths.begin(), local_queues_lengths.end(), local_L);
// }
//
//// {//test
//// if (1000 == query_id) {
//// exit(1);
//// }
//// }
//}
/*
* 7/24/2020-8:57
* L-th Selection.
* And also M-th Selection. Then the computation should be the same as merging.
*/
//inline void Searching::para_search_with_top_m_subsearch_v2(
// const idi local_M_max,
// const idi query_id,
// const idi K,
// const idi global_L,
// const idi local_L,
// const idi total_L,
// const idi init_queue_size,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const std::vector<idi> &local_queues_starts,
// std::vector<idi> &local_queues_sizes,
// std::vector<idi> &local_m_counts,
//// std::vector<idi> &local_top_m_positions,
// std::vector< std::vector<idi> > &top_m_candidates_list,
// boost::dynamic_bitset<> &is_visited)
//{
// uint64_t tmp_count_computation = 0;
// {// Initialization
// // is_visited flag array
////#pragma omp parallel for
//// Cannot use OMP for bit array is_visited!
// for (idi c_i = 0; c_i < total_L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < total_L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
//
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// idi local_queue_base = local_queues_starts[q_i];
// idi init_ids_base = q_i * init_queue_size;
// idi init_ids_bound = init_ids_base + init_queue_size;
// for (idi id_i = init_ids_base; id_i < init_ids_bound; ++id_i) {
// idi v_id = init_ids[id_i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[local_queue_base++] = Candidate(v_id, dist, false); // False means not checked.
//// {//test
//// if (11 == query_id
//// && 400620 == v_id) {
//// printf("query_id: %u "
//// "(%u %f)\n",
//// query_id,
//// v_id, dist);
//// }
//// }
// }
// local_queues_sizes[q_i] = init_queue_size;
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// }
//
// // Searching
// if (num_threads_ == 1) { // Single threads
//// local_queues_lengths[0] = local_L;
// std::sort(
// set_L.begin(),
// set_L.end());
// subsearch_with_top_m(
// local_M_max,
// query_id,
// local_L,
// set_L,
// 0,
// local_queues_sizes[0],
// top_m_candidates_list[0],
// is_visited,
// tmp_count_computation);
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// } else { // Multiple threads
// const dataf *query_data = queries_load_ + query_id * dimension_;
// const idi num_queues = num_threads_;
//#pragma omp parallel for
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// idi local_queue_base = local_queues_starts[q_i];
// std::sort(
// set_L.begin() + local_queue_base,
// set_L.begin() + local_queue_base + init_queue_size);
// }
// idi local_M = 1;
// idi iter = 0;
// std::vector<idi> ks(num_queues, 0);
// distf bound_lth = FLT_MAX;
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// bound_lth = std::min(bound_lth, set_L[local_queues_starts[q_i] + init_queue_size - 1].distance_);
// }
// selecting_unchecked_top_M_seq(
// query_id,
// iter,
// set_L,
// ks,
// local_M,
// num_queues,
// local_queues_starts,
// local_queues_sizes,
// local_m_counts);
//// {//test
//// printf("query_id: %u "
//// "iter: %u",
//// query_id,
//// iter);
//// printf(" local_queues_sizes:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", local_queues_sizes[i]);
//// }
//// printf(" local_m_counts:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", local_m_counts[i]);
//// }
//// printf(" ks:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", ks[i]);
//// }
//// printf("\n");
//// }
//
//
// double tmp_time_pick_top_m = 0;
// uint8_t not_finished = 1;
// while (true) {
// not_finished = 0;
// ++iter;
// // TODO: openmp
////#pragma omp parallel for reduction(+ : tmp_count_computation) reduction(+ : tmp_time_pick_top_m)
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// idi &k = ks[q_i];
// idi &local_queue_size = local_queues_sizes[q_i];
// auto &local_top_m_candidates = top_m_candidates_list[q_i];
//// if (k >= local_queue_size) {
//// continue;
//// }
//// not_finished = 1;
// idi local_m_count = local_m_counts[q_i];
//// // Cannot do this. local_m_count being 0 does not mean this worker is finished.
// if (local_M < num_queues && !local_m_count) {
// local_m_count = 1;
// }
// if (!local_m_count) {
//// k = local_L;
// continue;
// }
// not_finished = 1;
//// if (local_M < static_cast<idi>(num_threads_) &&
//// 0 == local_m_count) {
//// local_m_count = 1;
//// }
// const idi local_queue_start = local_queues_starts[q_i];
//
// subsearch_top_m_for_one_iteration_lth_mth(
// bound_lth,
//// local_top_m_pos,
// iter,
// k,
// local_m_count,
// query_id,
// query_data,
// local_L,
// set_L,
// local_queue_start,
// local_queue_size,
// local_top_m_candidates,
// is_visited,
// tmp_count_computation,
// tmp_time_pick_top_m);
// }
// time_pick_top_m_ += tmp_time_pick_top_m;
// tmp_time_pick_top_m = 0;
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// if (!not_finished) {
// break;
// }
// {// Setecting and update local_queues_lengths
//// {//test
//// printf("query_id: %u "
//// "iter: %u "
//// "before:local_queues_sizes: (%u, %u)\n",
//// query_id,
//// iter,
//// local_queues_sizes[0], local_queues_sizes[1]);
//// }
//// time_select_L_ -= WallTimer::get_time_mark();
// bound_lth = selecting_top_L_seq(
// set_L,
// global_L,
//// local_L,
// num_queues,
// local_queues_starts,
// local_queues_sizes);
//// time_select_L_ += WallTimer::get_time_mark();
//// {//test
//// for (idi t_i = 0; t_i < num_queues; ++t_i) {
//// idi sub_start = local_queues_starts[t_i];
//// idi sub_bound = sub_start + local_queues_sizes[t_i];
//// for (idi e_i = sub_start; e_i < sub_bound; ++e_i) {
//// L_ids_.push_back(set_L[e_i].id_);
//// }
//// }
//// std::sort(L_ids_.begin(), L_ids_.end());
//// std::sort(M_ids_.begin(), M_ids_.end());
//// for (idi m_i = 0; m_i < M_ids_.size(); ++m_i) {
//// printf("query_id: %u "
//// "iter: %u "
//// "M[%u]: "
//// "%u\n",
//// query_id,
//// iter,
//// m_i,
//// M_ids_[m_i]);
//// }
//// M_ids_.clear();
//// for (idi l_i = 0; l_i < L_ids_.size(); ++l_i) {
//// printf("query_id: %u "
//// "iter: %u "
//// "L[%u]: "
//// "%u\n",
//// query_id,
//// iter,
//// l_i,
//// L_ids_[l_i]);
//// }
//// L_ids_.clear();
//// }
//// {//test
//// printf("query_id: %u "
//// "iter: %u "
//// "after:local_queues_sizes: (%u, %u)\n",
//// query_id,
//// iter,
//// local_queues_sizes[0], local_queues_sizes[1]);
//// }
// }
// {// Scale M
// if (local_M < local_M_max) {
// local_M <<= 1;
// }
//// else {
//// local_M = value_M_max;
//// }
// }
// {
//// {//test
//// printf("query_id: %u "
//// "iter: %u "
//// "selecting_M\n",
//// query_id,
//// iter);
//// }
//// {//test
//// if (0 == query_id
//// && 10 == iter) {
//// printf("test.\n");
//// }
//// }
//// time_select_M_ -= WallTimer::get_time_mark();
// selecting_unchecked_top_M_seq(
// query_id,
// iter,
// set_L,
// ks,
// local_M,
// num_queues,
// local_queues_starts,
// local_queues_sizes,
// local_m_counts);
//// time_select_M_ += WallTimer::get_time_mark();
//// {//test
//// printf("query_id: %u "
//// "iter: %u "
//// "local_m_counts: (%u, %u)\n",
//// query_id,
//// iter,
//// local_m_counts[0], local_m_counts[1]);
//// }
// }
//// {//test
//// printf("query_id: %u "
//// "iter: %u",
//// query_id,
//// iter);
//// printf(" local_queues_sizes:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", local_queues_sizes[i]);
//// }
//// printf(" local_m_counts:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", local_m_counts[i]);
//// }
//// printf(" ks:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", ks[i]);
//// }
//// printf("\n");
//// }
// }
// }
//
//// time_merge_ -= WallTimer::get_time_mark();
// {// Return the results to set_K
// std::vector<idi> pointer(num_threads_, 0);
// // get the first
// distf min_dist = FLT_MAX;
// idi min_q_i;
// idi min_id;
// idi min_sub;
// idi last_id;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// if (pointer[q_i] >= local_queues_sizes[q_i]) {
// continue;
// }
// idi sub = pointer[q_i] + local_queues_starts[q_i];
// distf tmp_dist = set_L[sub].distance_;
// idi tmp_id = set_L[sub].id_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// } else if (tmp_dist == min_dist && tmp_id < min_id) {
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[0] = set_L[min_sub].id_;
//// {//test
//// printf("query_id: %u "
//// "[%u]: "
//// "(%u, %f)\n",
//// query_id,
//// 0,
//// set_L[min_sub].id_, set_L[min_sub].distance_);
//// }
// ++pointer[min_q_i];
// last_id = set_K[0];
//
// bool is_finished = false;
// idi k_i = 1;
// while (k_i < K && !is_finished) {
// is_finished = true;
// min_dist = FLT_MAX;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// const idi local_queue_size = local_queues_sizes[q_i];
// idi sub = pointer[q_i] + local_queues_starts[q_i];
//
// while (pointer[q_i] < local_queue_size
// && set_L[sub].id_ == last_id) {
// ++pointer[q_i];
// ++sub;
// }
// if (pointer[q_i] >= local_queue_size) {
// continue;
// }
// is_finished = false;
// distf tmp_dist = set_L[sub].distance_;
// idi tmp_id = set_L[sub].id_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// } else if (tmp_dist == min_dist && tmp_id < min_id) {
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[k_i] = set_L[min_sub].id_;
//// {//test
//// printf("query_id: %u "
//// "[%u]: "
//// "(%u, %f)\n",
//// query_id,
//// k_i,
//// set_L[min_sub].id_, set_L[min_sub].distance_);
//// }
// ++pointer[min_q_i];
// ++k_i;
// }
// }
//// time_merge_ += WallTimer::get_time_mark();
//// {// Return the results to set_K
//// // How to deal with duplicate?
//// idi last_id = set_L[0].id_;
//// set_K[0] = last_id;
//// idi k_i = 1;
//// idi l_i = 1;
//// while (k_i < K && l_i < L) {
//// if (last_id == set_L[l_i].id_) {
//// ++l_i;
//// continue;
//// }
//// last_id = set_L[l_i++].id_;
//// set_K[k_i++] = last_id;
//// }
//// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
//// std::fill(local_queues_sizes.begin(), local_queues_sizes.end(), init_queue_size);
// }
//
//// {//test
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
/*
* 7/26/2020-15:41
* L-th and M-th Selection.
* Seq-Par Phases: when M is 1 and 2, do sequential searching;
* When M is equal and larger than 4, do parallel searching.
* It's for load-balance issue.
*/
inline void Searching::para_search_with_top_m_subsearch_v3(
const idi local_M_max,
const idi local_M_middle,
const idi query_id,
const idi K,
const idi global_L,
const idi local_L,
// const idi total_L,
// const idi init_queue_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
std::vector<idi> &local_m_counts,
std::vector< std::vector<idi> > &top_m_candidates_list,
boost::dynamic_bitset<> &is_visited)
{
time_initialization_ -= WallTimer::get_time_mark();
uint64_t tmp_count_computation = 0;
{// Initialization
// is_visited flag array
//#pragma omp parallel for
// Cannot use OMP for bit array is_visited!
for (idi c_i = 0; c_i < global_L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < global_L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi id_i = 0; id_i < global_L; ++id_i) {
idi v_id = init_ids[id_i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[id_i] = Candidate(v_id, dist, false); // False means not checked.
}
local_queues_sizes[0] = global_L;
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
std::sort(set_L.begin(), set_L.begin() + global_L);
}
time_initialization_ += WallTimer::get_time_mark();
// Searching
if (num_threads_ == 1) { // Single threads
// std::sort(
// set_L.begin(),
// set_L.end());
subsearch_with_top_m(
local_M_max,
query_id,
local_L,
set_L,
0,
local_queues_sizes[0],
top_m_candidates_list[0],
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
} else { // Multiple threads
const dataf *query_data = queries_load_ + query_id * dimension_;
const idi num_queues = num_threads_;
idi local_M = 1;
idi iter = 0;
std::vector<idi> ks(num_queues, 0);
time_sequential_phase_ -= WallTimer::get_time_mark();
{// Sequential Search for M = 1, 2.
idi &k = ks[0];
while (k < global_L && local_M < local_M_middle) {
++iter;
subsearch_top_m_for_one_iteration(
iter,
k,
local_M,
query_id,
query_data,
global_L,
set_L,
0,
local_queues_sizes[0],
top_m_candidates_list[0],
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
{// Double M
if (local_M < local_M_max) {
local_M <<= 1;
}
}
}
}
time_sequential_phase_ += WallTimer::get_time_mark();
time_parallel_phase_ -= WallTimer::get_time_mark();
distf bound_lth = set_L[global_L - 1].distance_;
{// Parallel Search for M >= 4, or local_M_middle
time_assign_s_ -=WallTimer::get_time_mark();
{// Assign elements from Queue[0] to others
idi dst_i = 1;
for (idi e_i = 1; e_i < global_L; ++e_i) {
idi dest_sub = e_i % num_queues;
if (0 == dest_sub) {
set_L[dst_i++] = set_L[e_i];
} else {
set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i];
}
}
local_queues_sizes[0] = dst_i;
}
std::fill(ks.begin(), ks.end(), 0);
selecting_unchecked_top_M_seq(
query_id,
iter,
set_L,
ks,
local_M,
num_queues,
local_queues_starts,
local_queues_sizes,
local_m_counts);
time_assign_s_ +=WallTimer::get_time_mark();
double tmp_time_pick_top_m = 0;
uint64_t tmp_count_add_to_queue = 0;
uint8_t not_finished = 1;
double tmp_time_distance_computation = 0;
double tmp_time_add_to_queue = 0;
while (true) {
time_expand_ -= WallTimer::get_time_mark();
not_finished = 0;
++iter;
#pragma omp parallel for reduction(+ : tmp_count_computation) \
reduction(+ : tmp_time_pick_top_m) \
reduction(+ : tmp_count_add_to_queue) \
reduction(+ : tmp_time_distance_computation) \
reduction(+ : tmp_time_add_to_queue)
for (idi q_i = 0; q_i < num_queues; ++q_i) {
tmp_time_pick_top_m -= WallTimer::get_time_mark();
idi L_value = q_i == 0 ? global_L : local_L;
idi &k = ks[q_i];
idi &local_queue_size = local_queues_sizes[q_i];
auto &local_top_m_candidates = top_m_candidates_list[q_i];
idi local_m_count = local_m_counts[q_i];
// if (local_M < num_queues && !local_m_count) {
// local_m_count = 1;
// }
tmp_time_pick_top_m += WallTimer::get_time_mark();
if (!local_m_count) {
continue;
}
not_finished = 1;
const idi local_queue_start = local_queues_starts[q_i];
subsearch_top_m_for_one_iteration_lth_mth(
bound_lth,
iter,
k,
local_m_count,
query_id,
query_data,
L_value,
set_L,
local_queue_start,
local_queue_size,
local_top_m_candidates,
is_visited,
tmp_count_computation,
tmp_time_pick_top_m,
tmp_count_add_to_queue,
tmp_time_distance_computation,
tmp_time_add_to_queue);
}
time_add_to_queue_ += tmp_time_add_to_queue;
tmp_time_add_to_queue = 0;
time_distance_computation_ += tmp_time_distance_computation;
tmp_time_distance_computation = 0;
count_add_to_queue_ += tmp_count_add_to_queue;
tmp_count_add_to_queue = 0;
time_pick_top_m_ += tmp_time_pick_top_m;
tmp_time_pick_top_m = 0;
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
time_expand_ += WallTimer::get_time_mark();
if (!not_finished) {
break;
}
{// Scale M
if (local_M < local_M_max) {
local_M <<= 1;
}
// else {
// local_M = value_M_max;
// }
}
time_select_ -= WallTimer::get_time_mark();
#pragma omp parallel sections
{
#pragma omp section
{// Setecting and update local_queues_lengths
// time_select_L_ -= WallTimer::get_time_mark();
bound_lth = selecting_top_L_seq(
set_L,
global_L,
// local_L,
num_queues,
local_queues_starts,
local_queues_sizes);
// time_select_L_ += WallTimer::get_time_mark();
}
#pragma omp section
{
// time_select_M_ -= WallTimer::get_time_mark();
selecting_unchecked_top_M_seq(
query_id,
iter,
set_L,
ks,
local_M,
num_queues,
local_queues_starts,
local_queues_sizes,
local_m_counts);
// time_select_M_ += WallTimer::get_time_mark();
}
}
time_select_ += WallTimer::get_time_mark();
// {//test
// printf("query_id: %u "
// "iter: %u",
// query_id,
// iter);
// printf(" local_queues_sizes:");
// for (idi i = 0; i < num_queues; ++i) {
// printf(" %u", local_queues_sizes[i]);
// }
// printf(" local_m_counts:");
// for (idi i = 0; i < num_queues; ++i) {
// printf(" %u", local_m_counts[i]);
// }
// printf(" ks:");
// for (idi i = 0; i < num_queues; ++i) {
// printf(" %u", ks[i]);
// }
// printf("\n");
// }
}
}
time_parallel_phase_ += WallTimer::get_time_mark();
}
// time_merge_ -= WallTimer::get_time_mark();
time_ending_ -= WallTimer::get_time_mark();
{// Return the results to set_K
std::vector<idi> pointer(num_threads_, 0);
// get the first
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_id;
idi min_sub;
idi last_id;
for (int q_i = 0; q_i < num_threads_; ++q_i) {
if (pointer[q_i] >= local_queues_sizes[q_i]) {
continue;
}
idi sub = pointer[q_i] + local_queues_starts[q_i];
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
}
}
set_K[0] = set_L[min_sub].id_;
// {//test
// printf("query_id: %u "
// "[%u]: "
// "(%u, %f)\n",
// query_id,
// 0,
// set_L[min_sub].id_, set_L[min_sub].distance_);
// }
++pointer[min_q_i];
last_id = set_K[0];
bool is_finished = false;
idi k_i = 1;
while (k_i < K && !is_finished) {
is_finished = true;
min_dist = FLT_MAX;
for (int q_i = 0; q_i < num_threads_; ++q_i) {
const idi local_queue_size = local_queues_sizes[q_i];
idi sub = pointer[q_i] + local_queues_starts[q_i];
while (pointer[q_i] < local_queue_size
&& set_L[sub].id_ == last_id) {
++pointer[q_i];
++sub;
}
if (pointer[q_i] >= local_queue_size) {
continue;
}
is_finished = false;
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
}
}
set_K[k_i] = set_L[min_sub].id_;
// {//test
// printf("query_id: %u "
// "[%u]: "
// "(%u, %f)\n",
// query_id,
// k_i,
// set_L[min_sub].id_, set_L[min_sub].distance_);
// }
++pointer[min_q_i];
++k_i;
}
}
// time_merge_ += WallTimer::get_time_mark();
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0);
}
time_ending_ += WallTimer::get_time_mark();
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
}
//inline void Searching::para_search_with_top_m_subsearch_v2(
// const idi local_M_max,
// const idi query_id,
// const idi K,
// const idi global_L,
// const idi local_L,
// const idi total_L,
// const idi init_queue_end,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const std::vector<idi> &local_queues_bases,
// std::vector<idi> &local_queues_ends,
// std::vector< std::vector<idi> > &top_m_candidates_list,
// std::vector<idi> &top_m_ends,
// boost::dynamic_bitset<> &is_visited)
//{
// uint64_t tmp_count_computation = 0;
// {// Initialization
// // is_visited flag array
////#pragma omp parallel for
//// Cannot use OMP for bit array is_visited!
// for (idi c_i = 0; c_i < total_L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < total_L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
//
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// idi local_queue_base = local_queues_bases[q_i];
// idi init_ids_base = q_i * init_queue_end;
// idi init_ids_bound = init_ids_base + init_queue_end;
// for (idi id_i = init_ids_base; id_i < init_ids_bound; ++id_i) {
// idi v_id = init_ids[id_i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[local_queue_base++] = Candidate(v_id, dist, false); // False means not checked.
// }
// local_queues_ends[q_i] = init_queue_end;
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
////#pragma omp parallel for reduction(+ : tmp_count_computation)
//// for (unsigned i = 0; i < total_L; i++) {
//// unsigned v_id = init_ids[i];
//// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
//// dataf norm = *v_data++;
//// ++tmp_count_computation;
//// distf dist = compute_distance_with_norm(v_data, query_data, norm);
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
//// }
//// count_distance_computation_ += tmp_count_computation;
//// tmp_count_computation = 0;
// }
//
// // Searching
// if (num_threads_ == 1) { // Single threads
//// local_queues_lengths[0] = local_L;
// std::sort(
// set_L.begin(),
// set_L.end());
// subsearch_with_top_m(
// local_M_max,
// query_id,
// local_L,
// set_L,
// 0,
// local_queues_ends[0],
// top_m_candidates_list[0],
// is_visited,
// tmp_count_computation);
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// } else { // Multiple threads
//// std::fill(local_queues_lengths.begin(), local_queues_lengths.end(), local_L);
// const dataf *query_data = queries_load_ + query_id * dimension_;
// const idi num_queues = num_threads_;
//#pragma omp parallel for
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// idi local_queue_base = local_queues_bases[q_i];
// std::sort(
// set_L.begin() + local_queue_base,
// set_L.begin() + local_queue_base + init_queue_end);
// }
// distf bound_lth = FLT_MAX;
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// bound_lth = std::min(bound_lth, set_L[local_queues_bases[q_i] + init_queue_end - 1].distance_);
// }
//// const idi local_queue_length = (L - 1) / num_queues + 1;
// std::vector<idi> ks(num_queues, 0);
// idi iter = 0;
// idi local_M = 1;
// uint8_t not_finished = 1;
// while (not_finished) {
// not_finished = 0;
// ++iter;
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// idi &k = ks[q_i];
// idi &local_queue_end = local_queues_ends[q_i];
// auto &local_top_m_candidates = top_m_candidates_list[q_i];
// if (k >= local_queue_end) {
// continue;
// }
// not_finished = 1;
// const idi local_queue_base = local_queues_bases[q_i];
//
// subsearch_top_m_for_one_iteration(
// bound_lth,
// iter,
// k,
// local_M,
// query_id,
// query_data,
// local_L,
// set_L,
// local_queue_base,
// local_queue_end,
// local_top_m_candidates,
// is_visited,
// tmp_count_computation);
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// {// Setecting and update local_queues_lengths
// time_merge_ -= WallTimer::get_time_mark();
// bound_lth = selecting_top_L_seq(
// set_L,
// global_L,
//// local_L,
// num_queues,
// local_queues_bases,
// local_queues_ends);
// time_merge_ += WallTimer::get_time_mark();
//// {// local_queues_ends
//// printf("query_id: %u "
//// "iter: %u",
////// "local_queues_ends:",
//// query_id,
//// iter);
//// for (idi q_i = 0; q_i < num_queues; ++q_i) {
//// printf(" [%u]: %u",
//// q_i,
//// local_queues_ends[q_i]);
//// }
//// printf("\n");
//// }
// }
// {// Scale M
// if (local_M < local_M_max) {
// local_M <<= 1;
// }
//// else {
//// local_M = value_M_max;
//// }
// }
// }
// }
//
// time_merge_ -= WallTimer::get_time_mark();
// {// Return the results to set_K
// std::vector<idi> pointer(num_threads_, 0);
// // get the first
// distf min_dist = FLT_MAX;
// idi min_q_i;
// idi min_sub;
// idi last_id;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// if (pointer[q_i] >= local_queues_ends[q_i]) {
// continue;
// }
// idi sub = pointer[q_i] + local_queues_bases[q_i];
// distf tmp_dist = set_L[sub].distance_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[0] = set_L[min_sub].id_;
// ++pointer[min_q_i];
// last_id = set_K[0];
//
// bool is_finished = false;
// idi k_i = 1;
// while (k_i < K && !is_finished) {
// is_finished = true;
// min_dist = FLT_MAX;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// const idi local_queue_end = local_queues_ends[q_i];
// if (pointer[q_i] >= local_queue_end) {
// continue;
// }
// is_finished = false;
// idi sub = pointer[q_i] + local_queues_bases[q_i];
// while (set_L[sub].id_ == last_id
// && pointer[q_i] < local_queue_end) {
// ++pointer[q_i];
// sub = pointer[q_i] + local_queues_bases[q_i];
// }
// if (pointer[q_i] >= local_queue_end) {
// continue;
// }
//// if (set_L[sub].id_ == last_id) {
//// // Duplicate
//// ++pointer[q_i];
//// continue;
//// }
// distf tmp_dist = set_L[sub].distance_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[k_i] = set_L[min_sub].id_;
// ++pointer[min_q_i];
// ++k_i;
// }
// }
// time_merge_ += WallTimer::get_time_mark();
//// {// Return the results to set_K
//// // How to deal with duplicate?
//// idi last_id = set_L[0].id_;
//// set_K[0] = last_id;
//// idi k_i = 1;
//// idi l_i = 1;
//// while (k_i < K && l_i < L) {
//// if (last_id == set_L[l_i].id_) {
//// ++l_i;
//// continue;
//// }
//// last_id = set_L[l_i++].id_;
//// set_K[k_i++] = last_id;
//// }
//// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
//// std::fill(local_queues_ends.begin(), local_queues_ends.end(), init_queue_end);
// }
//
//// {//test
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
/*
* 6/27/2020-12:33
* Do searching on the local_set_L
* local_set_L is already sorted
* is_visited is already set up.
*/
inline void Searching::subsearch_for_simple_search(
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi base_set_L,
idi &set_L_end,
// std::vector<uint8_t> &is_visited,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation)
{
const dataf *query_data = queries_load_ + query_id * dimension_;
// idi local_top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi iter = 0;
// idi M = 1; // value of M
while (k < local_L) {
++iter;
// {//test
// printf("query_id: %u "
// "iter: %u\n",
// query_id,
// iter);
// }
// Select the top-1 unchecked candidate
idi top_1;
idi last_k = local_L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < set_L_end; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
top_1 = set_L[index_set_L].id_;
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
// local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_;
break;
}
if (last_k == local_L) {
break;
}
idi nk = local_L;
// Push top-1' neighbors into the queue.
idi cand_id = top_1;
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
// {// Critical edition
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
// }
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++local_count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// {
// if (0 == query_id
// && (785802 == nb_id
// || 180955 == nb_id
// || 240996 == nb_id
// || 813701 == nb_id
// || 708177 == nb_id
// || 87578 == nb_id
// || 561813 == nb_id
// || 701258 == nb_id
// || 872728 == nb_id)) {
//// && 180955 == nb_id) {
// printf("parent: %u "
// "nb_id: %u "
// "dist: %f "
// "base_set_L: %u "
// "set_L_end: %u\n",
// cand_id,
// nb_id,
// dist,
// base_set_L,
// set_L_end);
// }
// }
if (dist > set_L[set_L_end - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
set_L_end,
local_L,
cand);
if (r < nk) {
nk = r;
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
}
/*
* 6/27/2020-12:26
* Is is good to use subsearch by every thread it self?
*/
inline void Searching::para_simple_search_subsearch(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited)
{
uint64_t tmp_count_computation = 0;
{// Initialization
// is_visited flag array
//#pragma omp parallel for
// Cannot use OMP for bit array is_visited!
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(
// set_L.begin(),
// set_L.begin() + L);
}
idi queue_end = L;
// Searching
if (num_threads_ == 1) { // Single threads
std::sort(
set_L.begin(),
set_L.end());
subsearch_for_simple_search(
query_id,
L,
set_L,
0,
queue_end,
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
// {
//// {//test
//// for (idi i = 0; i < queue_end; ++i) {
//// printf("start: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// idi half_length = queue_end / 2;
// std::sort(
// set_L.begin(),
// set_L.begin() + half_length);
//// {//test
//// for (idi i = 0; i < half_length; ++i) {
//// printf("sorted: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// subsearch_for_simple_search(
// query_id,
// half_length, // local_L
// set_L,
// 0, // base_set_L
// half_length, // set_L_end
// is_visited,
// tmp_count_computation);
//
//// {//test
//// for (idi i = 0; i < half_length; ++i) {
//// printf("subsearched: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// std::sort(
// set_L.begin() + half_length,
// set_L.end());
//
//// {//test
//// for (idi i = half_length; i < queue_end; ++i) {
//// printf("sorted: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// subsearch_for_simple_search(
// query_id,
// half_length, // local_L
// set_L,
// half_length, // base_set_L
// half_length, // set_L_end
// is_visited,
// tmp_count_computation);
//// {//test
//// for (idi i = half_length; i < queue_end; ++i) {
//// printf("subsearched: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//// {//test
//// for (idi i = 0; i < queue_end; ++i) {
//// printf("explored: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
// count_distance_computation_ += tmp_count_computation;
//
// std::vector <Candidate> tmp_set_L(L);
// std::merge(set_L.begin(), set_L.begin() + half_length,
// set_L.begin() + half_length, set_L.end(),
// tmp_set_L.begin());
// std::copy(tmp_set_L.begin(), tmp_set_L.end(), set_L.begin());
//// {//test
//// for (idi i = 0; i < queue_end; ++i) {
//// printf("merged: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
// }
} else { // Multiple threads
const idi num_queues = num_threads_;
const idi local_queue_length = (L - 1) / num_queues + 1;
// Parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi local_queue_base = q_i * local_queue_length;
if (local_queue_base >= L) {
continue;
}
idi local_queue_end = local_queue_length;
if (local_queue_base + local_queue_end > L) {
local_queue_end = L - local_queue_base;
}
std::sort(
set_L.begin() + local_queue_base,
set_L.begin() + local_queue_base + local_queue_end);
subsearch_for_simple_search(
query_id,
local_queue_end, // local_L
set_L,
local_queue_base, // base_set_L
local_queue_end, // set_L_end
is_visited,
tmp_count_computation);
}
count_distance_computation_ += tmp_count_computation;
// Merge
// time_merge_ -= WallTimer::get_time_mark();
merge_in_set_L(
set_L,
L,
num_queues,
local_queue_length);
// time_merge_ += WallTimer::get_time_mark();
}
{// Return the results to set_K
// How to deal with duplicate?
idi last_id = set_L[0].id_;
set_K[0] = last_id;
idi k_i = 1;
idi l_i = 1;
while (k_i < K && l_i < L) {
if (last_id == set_L[l_i].id_) {
++l_i;
continue;
}
last_id = set_L[l_i++].id_;
set_K[k_i++] = last_id;
}
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
}
// {//test
// if (0 == query_id) {
// exit(1);
// }
// }
}
///*
// * 6/22/2020-09:38
// * A synchronized last element as the sentinel
// */
//inline void Searching::para_search_with_top_m_merge_queues_global_threshold(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited)
//{
//// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
// local_queues_ends[num_threads_ - 1] = L;
//
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// { // Single thread
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
//
// }
// }
//
// { // Multiple Threads
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// {// Local queues' ends
//// printf("query%u:iter: %u", query_id, tmp_count);
// idi total_elements = 0;
// for (int i_t = 0; i_t < num_threads_ - 1; ++i_t) {
// total_elements += local_queues_ends[i_t];
// }
// number_local_elements_ += total_elements;
//// printf(" total_elements: %u+%u\n", total_elements - local_queues_ends[num_threads_ - 1], local_queues_ends[num_threads_ - 1]);
//// for (int i_t = 0; i_t < num_threads_; ++i_t) {
//// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
//// }
//// printf("\n");
// }
//
//// // Merge. Merge all queues in parallel.
// {
// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_array(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
// if (r < nk) {
// nk = r;
// }
// }
// time_merge_ += WallTimer::get_time_mark();
// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
//
// }
// }
//
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// if (0 == query_id) {
//// exit(1);
//// }
//// }
//}
///*
// * 6/7/2020-16:55
// * Use 1 threads to scale M until the value_M_middle.
// * Then use multiple threads.
// * Except for Thread 0, other threads are collectors. They collect, but do not merge.
// * Only merge once after Thread 0 stops.
// */
//inline void Searching::para_search_with_top_m_merge_queues_collectors(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
//// std::vector<distf> &local_thresholds)
//// BitVector &is_visited)
//{
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
//// boost::sort::block_indirect_sort(
//// set_L.begin() + base_set_L,
//// set_L.begin() + base_set_L + L,
//// num_threads_);
// local_queues_ends[num_threads_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// // Single thread
// {
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
//// int real_threads = std::min(static_cast<int>(M), num_threads_);
//// idi queue_base = num_threads_ - real_threads;
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// // Multiple Threads
// {
//// while (k < L/num_threads_/2) {
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//// int real_threads = std::min(static_cast<int>(M), num_threads_);
//// idi queue_base = num_threads_ - real_threads;
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi chunk_size;
// if (num_threads_ <= top_m_candidates_end) {
// chunk_size = (top_m_candidates_end - 1) / num_threads_ + 1;
// } else {
// chunk_size = 1;
// }
// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
////#pragma omp parallel for reduction(+ : tmp_count_computation)
//#pragma omp parallel for reduction(+ : tmp_count_computation) schedule(static, chunk_size)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
//// {
//// if (c_i < chunk_size && tid != 0) {
//// printf("query_id: %u "
//// "tmp_count: %u "
//// "chunk_size: %u "
//// "c_i: %u "
//// "tid: %u\n",
//// query_id,
//// tmp_count,
//// chunk_size,
//// c_i,
//// tid);
//// }
//// }
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
////// // Merge. Merge all queues in parallel.
//// {
//// time_merge_ -= WallTimer::get_time_mark();
//// if (num_threads_ > 1) {
////// idi r = merge_all_queues_queue_base(
////// set_L,
////// local_queues_ends,
////// queue_base,
////// real_threads,
////// local_queue_length,
////// L);
//// idi r = merge_all_queues_para_array(
//// set_L,
//// local_queues_ends,
//// local_queue_length,
//// L);
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// time_merge_ += WallTimer::get_time_mark();
//// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
//
//// // Merge only once after Master Thread stops.
//// {
//// time_merge_ -= WallTimer::get_time_mark();
//// if (num_threads_ > 1) {
////// idi r = merge_all_queues_queue_base(
////// set_L,
////// local_queues_ends,
////// queue_base,
////// real_threads,
////// local_queue_length,
////// L);
//// merge_all_queues_para_array(
//// set_L,
//// local_queues_ends,
//// local_queue_length,
//// L);
//// }
//// time_merge_ += WallTimer::get_time_mark();
//// }
// }
//
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// printf("tmp_count: %u\n", tmp_count);
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
///*
// * 6/8/2020-16:39
// * Selecting rather than merging
// */
//inline void Searching::para_search_with_top_m_merge_queues_selecting(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
//{
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
//// boost::sort::block_indirect_sort(
//// set_L.begin() + base_set_L,
//// set_L.begin() + base_set_L + L,
//// num_threads_);
// local_queues_ends[num_threads_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// // Single thread
// {
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
//// int real_threads = std::min(static_cast<int>(M), num_threads_);
//// idi queue_base = num_threads_ - real_threads;
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// // Multiple Threads
// {
//// while (k < L/num_threads_/2) {
//// while (k < L) {
// while (true) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//// // Select M candidates
//// idi last_k = L;
////// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
//// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
//// idi index_set_L = c_i + base_set_L;
//// if (set_L[index_set_L].is_checked_) {
//// continue;
//// }
//// last_k = c_i; // Record the location of the last candidate selected.
//// set_L[index_set_L].is_checked_ = true;
//// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
//// }
//
// // Select M candidates
// {
// idi traverse_count = 0;
// idi bound_sub = L; // This is not always true!
// for (idi sub = 0; sub < bound_sub && top_m_candidates_end < M && traverse_count < L; ++sub) {
// for (int tid = 0; tid < num_threads_ && top_m_candidates_end < M && traverse_count < L; ++tid) {
// if (sub >= local_queues_ends[tid]) {
// continue;
// }
// idi index_set_L = tid * local_queue_length + sub;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
// }
//
// if (0 == top_m_candidates_end) {
// break;
// }
// }
//
//// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
//// idi r =
// add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
//// if (r < nk) {
//// nk = r;
//// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
//// // Merge. Merge all queues in parallel.
// {
// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_ > 1) {
//// idi r = merge_all_queues_queue_base(
//// set_L,
//// local_queues_ends,
//// queue_base,
//// real_threads,
//// local_queue_length,
//// L);
//// idi r =
// merge_all_queues_para_array(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
//// if (r < nk) {
//// nk = r;
//// }
// }
// time_merge_ += WallTimer::get_time_mark();
// }
//// if (nk <= last_k) {
//// k = nk;
//// } else {
//// k = last_k + 1;
//// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
//
////#pragma omp parallel for
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// set_K[k_i] = set_L[k_i + base_set_L].id_;
////// set_K[k_i] = set_L[k_i].id_;
//// }
//
// {
// idi k_i = 0;
// idi bound_sub = K / num_threads_;
// for (idi sub = 0; sub < bound_sub; ++sub) {
// for (int tid = 0; tid < num_threads_; ++tid) {
// idi index_set_L = tid * local_queue_length + sub;
// set_K[k_i++] = set_L[index_set_L].id_;
// }
// }
// idi remain = K - k_i;
// if (remain) {
// for (int tid = 0; tid < remain; ++tid) {
// idi index_set_L = tid * local_queue_length + bound_sub;
// set_K[k_i++] = set_L[index_set_L].id_;
// }
// }
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// printf("tmp_count: %u\n", tmp_count);
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
} // namespace PANNS
#endif //BATCH_SEARCHING_SEARCHING_H
|
ztrmm.c | #include "blas.h"
#include "error.h"
#include <stdio.h>
#include "handle.h"
#include "config.h"
#include "ztrmm.fatbin.c"
static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; }
static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; }
static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj,
const void * B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static inline CUresult cuMemcpyDtoH2DAsync(void * A, size_t lda, size_t ai, size_t aj,
CUdeviceptr B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static inline CUresult cuMemcpyDtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj,
CUdeviceptr B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static const double complex zero = 0.0 + 0.0 * I;
static const double complex one = 1.0 + 0.0 * I;
void ztrmm(CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag,
size_t m, size_t n,
double complex alpha, const double complex * restrict A, size_t lda,
double complex * restrict B, size_t ldb) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return;
}
if (m == 0 || n == 0)
return;
if (alpha == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] = zero;
}
return;
}
if (side == CBlasLeft) {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t k = 0; k < m; k++) {
if (B[j * ldb + k] != zero) {
register double complex temp = alpha * B[j * ldb + k];
for (size_t i = 0; i < k; i++)
B[j * ldb + i] += temp * A[k * lda + i];
if (diag == CBlasNonUnit) temp *= A[k * lda + k];
B[j * ldb + k] = temp;
}
}
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t k = m - 1;
do {
if (B[j * ldb + k] != zero) {
register double complex temp = alpha * B[j * ldb + k];
B[j * ldb + k] = temp;
if (diag == CBlasNonUnit) B[j * ldb + k] *= A[k * lda + k];
for (size_t i = k + 1; i < m; i++)
B[j * ldb + i] += temp * A[k * lda + i];
}
} while (k-- > 0);
}
}
}
else {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t i = m - 1;
do {
register double complex temp = B[j * ldb + i];
if (trans == CBlasTrans) {
if (diag == CBlasNonUnit) temp *= A[i * lda + i];
for (size_t k = 0; k < i; k++)
temp += A[i * lda + k] * B[j * ldb + k];
}
else {
if (diag == CBlasNonUnit) temp *= conj(A[i * lda + i]);
for (size_t k = 0; k < i; k++)
temp += conj(A[i * lda + k]) * B[j * ldb + k];
}
B[j * ldb + i] = alpha * temp;
} while (i-- > 0);
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++) {
register double complex temp = B[j * ldb + i];
if (trans == CBlasTrans) {
if (diag == CBlasNonUnit) temp *= A[i * lda + i];
for (size_t k = i + 1; k < m; k++)
temp += A[i * lda + k] * B[j * ldb + k];
}
else {
if (diag == CBlasNonUnit) temp *= conj(A[i * lda + i]);
for (size_t k = i + 1; k < m; k++)
temp += conj(A[i * lda + k]) * B[j * ldb + k];
}
B[j * ldb + i] = alpha * temp;
}
}
}
}
}
else {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
size_t j = n - 1;
do {
register double complex temp = alpha;
if (diag == CBlasNonUnit) temp *= A[j * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= temp;
for (size_t k = 0; k < j; k++) {
if (A[j * lda + k] != zero) {
register double complex temp = alpha * A[j * lda + k];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] += temp * B[k * ldb + i];
}
}
} while (j-- > 0);
}
else {
for (size_t j = 0; j < n; j++) {
register double complex temp = alpha;
if (diag == CBlasNonUnit) temp *= A[j * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= temp;
for (size_t k = j + 1; k < n; k++) {
if (A[j * lda + k] != zero) {
register double complex temp = alpha * A[j * lda + k];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] += temp * B[k * ldb + i];
}
}
}
}
}
else {
if (uplo == CBlasUpper) {
for (size_t k = 0; k < n; k++) {
for (size_t j = 0; j < k; j++) {
if (A[k * lda + j] != zero) {
register double complex temp;
if (trans == CBlasTrans)
temp = alpha * A[k * lda + j];
else
temp = alpha * conj(A[k * lda + j]);
for (size_t i = 0; i < m; i++)
B[j * ldb + i] += temp * B[k * ldb + i];
}
}
register double complex temp = alpha;
if (diag == CBlasNonUnit)
temp *= ((trans == CBlasTrans) ? A[k * lda + k] : conj(A[k * lda + k]));
if (temp != one) {
for (size_t i = 0; i < m; i++)
B[k * ldb + i] = temp * B[k * ldb + i];
}
}
}
else {
size_t k = n - 1;
do {
for (size_t j = k + 1; j < n; j++) {
if (A[k * lda + j] != zero) {
register double complex temp;
if (trans == CBlasTrans)
temp = alpha * A[k * lda + j];
else
temp = alpha * conj(A[k * lda + j]);
for (size_t i = 0; i < m; i++)
B[j * ldb + i] += temp * B[k * ldb + i];
}
}
register double complex temp = alpha;
if (diag == CBlasNonUnit)
temp *= ((trans == CBlasTrans) ? A[k * lda + k] : conj(A[k * lda + k]));
if (temp != one) {
for (size_t i = 0; i < m; i++)
B[k * ldb + i] = temp * B[k * ldb + i];
}
} while (k-- > 0);
}
}
}
}
void ztrmm2(CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag,
size_t m, size_t n,
double complex alpha, const double complex * restrict A, size_t lda,
const double complex * restrict B, size_t ldb,
double complex * restrict X, size_t ldx) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
else if (ldx < m)
info = 13;
if (info != 0) {
XERBLA(info);
return;
}
if (m == 0 || n == 0)
return;
if (alpha == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++)
X[j * ldx + i] = zero;
}
return;
}
if (side == CBlasLeft) {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t k = 0; k < m; k++) {
register double complex temp = B[j * ldb + k];
if (temp != zero) {
temp *= alpha;
for (size_t i = 0; i < k; i++)
X[j * ldx + i] += temp * A[k * lda + i];
if (diag == CBlasNonUnit) temp *= A[k * lda + k];
}
X[j * ldx + k] = temp;
}
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t k = m - 1;
do {
if (B[j * ldb + k] != zero) {
register double complex temp = alpha * B[j * ldb + k];
X[j * ldx + k] = temp;
if (diag == CBlasNonUnit) X[j * ldx + k] *= A[k * lda + k];
for (size_t i = k + 1; i < m; i++)
X[j * ldx + i] += temp * A[k * lda + i];
}
else
X[j * ldx + k] = B[j * ldb + k];
} while (k-- > 0);
}
}
}
else {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t i = m - 1;
do {
register double complex temp = B[j * ldb + i];
if (trans == CBlasTrans) {
if (diag == CBlasNonUnit) temp *= A[i * lda + i];
for (size_t k = 0; k < i; k++)
temp += A[i * lda + k] * B[j * ldb + k];
}
else {
if (diag == CBlasNonUnit) temp *= conj(A[i * lda + i]);
for (size_t k = 0; k < i; k++)
temp += conj(A[i * lda + k]) * B[j * ldb + k];
}
X[j * ldx + i] = alpha * temp;
} while (i-- > 0);
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++) {
register double complex temp = B[j * ldb + i];
if (trans == CBlasTrans) {
if (diag == CBlasNonUnit) temp *= A[i * lda + i];
for (size_t k = i + 1; k < m; k++)
temp += A[i * lda + k] * B[j * ldb + k];
}
else {
if (diag == CBlasNonUnit) temp *= conj(A[i * lda + i]);
for (size_t k = i + 1; k < m; k++)
temp += conj(A[i * lda + k]) * B[j * ldb + k];
}
X[j * ldx + i] = alpha * temp;
}
}
}
}
}
else {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
size_t j = n - 1;
do {
register double complex temp = alpha;
if (diag == CBlasNonUnit) temp *= A[j * lda + j];
for (size_t i = 0; i < m; i++)
X[j * ldx + i] = temp * B[j * ldb + i];
for (size_t k = 0; k < j; k++) {
if (A[j * lda + k] != zero) {
register double complex temp = alpha * A[j * lda + k];
for (size_t i = 0; i < m; i++)
X[j * ldx + i] += temp * B[k * ldb + i];
}
}
} while (j-- > 0);
}
else {
for (size_t j = 0; j < n; j++) {
register double complex temp = alpha;
if (diag == CBlasNonUnit) temp *= A[j * lda + j];
for (size_t i = 0; i < m; i++)
X[j * ldx + i] = temp * B[j * ldb + i];
for (size_t k = j + 1; k < n; k++) {
if (A[j * lda + k] != zero) {
register double complex temp = alpha * A[j * lda + k];
for (size_t i = 0; i < m; i++)
X[j * ldx + i] += temp * B[k * ldb + i];
}
}
}
}
}
else {
if (uplo == CBlasUpper) {
for (size_t k = 0; k < n; k++) {
for (size_t j = 0; j < k; j++) {
if (A[k * lda + j] != zero) {
register double complex temp;
if (trans == CBlasTrans)
temp = alpha * A[k * lda + j];
else
temp = alpha * conj(A[k * lda + j]);
for (size_t i = 0; i < m; i++)
X[j * ldx + i] += temp * B[k * ldb + i];
}
}
register double complex temp = alpha;
if (diag == CBlasNonUnit)
temp *= ((trans == CBlasTrans) ? A[k * lda + k] : conj(A[k * lda + k]));
if (temp != one) {
for (size_t i = 0; i < m; i++)
X[k * ldx + i] = temp * B[k * ldb + i];
}
}
}
else {
size_t k = n - 1;
do {
for (size_t j = k + 1; j < n; j++) {
if (A[k * lda + j] != zero) {
register double complex temp;
if (trans == CBlasTrans)
temp = alpha * A[k * lda + j];
else
temp = alpha * conj(A[k * lda + j]);
for (size_t i = 0; i < m; i++)
X[j * ldx + i] += temp * B[k * ldb + i];
}
}
register double complex temp = alpha;
if (diag == CBlasNonUnit)
temp *= ((trans == CBlasTrans) ? A[k * lda + k] : conj(A[k * lda + k]));
if (temp != one) {
for (size_t i = 0; i < m; i++)
X[k * ldx + i] = temp * B[k * ldb + i];
}
} while (k-- > 0);
}
}
}
}
CUresult cuZtrmm2(CUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag,
size_t m, size_t n,
double complex alpha, CUdeviceptr A, size_t lda, CUdeviceptr B, size_t ldb,
CUdeviceptr X, size_t ldx, CUstream stream) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
else if (ldx < m)
info = 13;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
CU_ERROR_CHECK(cuCtxPushCurrent(handle->context));
if (handle->ztrmm2 == NULL)
CU_ERROR_CHECK(cuModuleLoadData(&handle->ztrmm2, imageBytes));
const unsigned int mb = (trans == CBlasNoTrans) ? 64 : 8;
const unsigned int nb = (trans == CBlasNoTrans) ? 4 : 8;
const unsigned int kb = (trans == CBlasNoTrans) ? 16 : 4;
const unsigned int bx = (trans == CBlasNoTrans) ? 16 : 4;
const unsigned int by = (trans == CBlasNoTrans) ? 4 : 8;
char name[95];
if (trans == CBlasNoTrans)
snprintf(name, 78,
"_Z8ztrmm%c%c%cIL9CBlasDiag%dELj%uELj%uELj%uELj%uELj%uEEv7double2PKS1_S3_PS1_iiiii",
side, uplo, trans, diag, mb, nb, kb, bx, by);
else
snprintf(name, 95,
"_Z8ztrmm%c%cTIL14CBlasTranspose%dEL9CBlasDiag%dELj%uELj%uELj%uELj%uELj%uEEv7double2PKS2_S4_PS2_iiiii",
side, uplo, trans, diag, mb, nb, kb, bx, by);
CUfunction function;
CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->ztrmm2, name));
void * params[] = { &alpha, &A, &B, &X, &lda, &ldb, &ldx, &m, &n };
CU_ERROR_CHECK(cuLaunchKernel(function,
(unsigned int)(m + mb - 1) / mb, (unsigned int)(n + nb - 1) / nb, 1,
bx, by, 1,
0, stream, params, NULL));
CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context));
return CUDA_SUCCESS;
}
CUresult cuZtrmm(CUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag,
size_t m, size_t n,
double complex alpha, CUdeviceptr A, size_t lda, CUdeviceptr B, size_t ldb,
CUstream stream) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
CU_ERROR_CHECK(cuCtxPushCurrent(handle->context));
CUdeviceptr X;
size_t ldx;
CU_ERROR_CHECK(cuMemAllocPitch(&X, &ldx, m * sizeof(double complex), n, sizeof(double complex)));
ldx /= sizeof(double complex);
CU_ERROR_CHECK(cuZtrmm2(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb, X, ldx, stream));
CU_ERROR_CHECK(cuMemcpyDtoD2DAsync(B, ldb, 0, 0, X, ldx, 0, 0, m, n, sizeof(double complex), stream));
CU_ERROR_CHECK(cuMemFree(X));
CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context));
return CUDA_SUCCESS;
}
CUresult cuMultiGPUZtrmm(CUmultiGPUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag,
size_t m, size_t n,
double complex alpha, const double complex * restrict A, size_t lda,
double complex * restrict B, size_t ldb) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
if (alpha == zero) {
zgemm(CBlasNoTrans, CBlasNoTrans, m, n, 0, zero, A, lda, B, ldb, zero, B, ldb);
return CUDA_SUCCESS;
}
const size_t mb = (trans == CBlasNoTrans) ? ZGEMM_N_MB : ZGEMM_CN_MB;
const size_t nb = ZGEMM_N_NB;
if (m <= mb || n <= nb) {
ztrmm(side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb);
return CUDA_SUCCESS;
}
if (side == CBlasLeft) {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
size_t i = (m + mb - 1) & ~(mb - 1);
do {
i -= mb;
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[(i + ib) * lda + i], lda, &B[i + ib], ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasLeft, CBlasUpper, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
} while (i > 0);
}
else {
for (size_t i = 0; i < m; i += mb) {
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, i, -one, &A[i], lda, B, ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasLeft, CBlasLower, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
}
}
}
else {
if (uplo == CBlasUpper) {
for (size_t i = 0; i < m; i += mb) {
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, trans, CBlasNoTrans, ib, n, i, -one, &A[i * lda], lda, B, ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasLeft, CBlasUpper, trans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
}
}
else {
size_t i = (m + mb - 1) & ~(mb - 1);
do {
i -= mb;
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, trans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[i * lda + i + ib], lda, &B[i + ib], ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasLeft, CBlasLower, trans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
} while (i > 0);
}
}
}
else {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
for (size_t j = 0; j < n; j += nb) {
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, j, -one, B, ldb, &A[j * lda], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasRight, CBlasUpper, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
}
}
else {
size_t j = (n + nb - 1) & ~(nb - 1);
do {
j -= nb;
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[j * lda + j + jb], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasRight, CBlasLower, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
} while (j > 0);
}
}
else {
if (uplo == CBlasUpper) {
size_t j = (n + nb - 1) & ~(nb - 1);
do {
j -= nb;
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, trans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[(j + jb) * lda + j], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasRight, CBlasUpper, trans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
} while (j > 0);
}
else {
for (size_t j = 0; j < n; j += nb) {
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, trans, m, jb, j, -one, B, ldb, &A[j], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasRight, CBlasLower, trans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
}
}
}
}
return CUDA_SUCCESS;
}
|
mat_mul_p4a_4000.c | /*
* file for mat_mul.c
*/
#include "./mat_mul.h"
#include "./size.h"
void mat_mul(int *a, int *b, int *c);
void mat_mul(int *a, int *b, int *c)
{
int i, j, k, t;
#pragma omp parallel for private(j, t, k)
for(i = 0; i <= 3999; i += 1)
for(j = 0; j <= 3999; j += 1) {
c[i*4000+j] = 0;
for(k = 0; k <= 3999; k += 1)
for(t = 0; t <= 99; t += 1)
c[i*4000+j] += a[i*4000+k]*b[j*4000+k];
}
return;
}
|
GB_binop__isne_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isne_int32
// A.*B function (eWiseMult): GB_AemultB__isne_int32
// A*D function (colscale): GB_AxD__isne_int32
// D*A function (rowscale): GB_DxB__isne_int32
// C+=B function (dense accum): GB_Cdense_accumB__isne_int32
// C+=b function (dense accum): GB_Cdense_accumb__isne_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_int32
// C=scalar+B GB_bind1st__isne_int32
// C=scalar+B' GB_bind1st_tran__isne_int32
// C=A+scalar GB_bind2nd__isne_int32
// C=A'+scalar GB_bind2nd_tran__isne_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x != y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT32 || GxB_NO_ISNE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isne_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isne_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isne_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isne_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isne_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__isne_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isne_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isne_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isne_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB_bind1st_tran__isne_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB_bind2nd_tran__isne_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test_for_reduction.c | //===-- test_for_reduction.c - Test reductions at for construct ---*- C -*-===//
//
// Part of the LOMP project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
//
// This file has been modified from the file
// openmp/runtime/test/tasking/omp_for_reduction.c
// of the LLVM project (https://github.com/llvm/llvm-project)
// under the Apache License v2.0 with LLVM Exceptions.
//
//===----------------------------------------------------------------------===//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "omp.h"
#include "tests.h"
#define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */
#define MAX_FACTOR 10
#define KNOWN_PRODUCT 3628800 /* 10! */
int test_omp_for_reduction(void) {
double dt;
int sum;
int diff;
int product = 1;
double dsum;
double dknown_sum;
double ddiff;
int logic_and;
int logic_or;
int bit_and;
int bit_or;
int exclusiv_bit_or;
int * logics;
int i;
int known_sum;
int known_product;
double rounding_error = 1.E-9; /* over all rounding error to be
ignored in the double tests */
double dpt;
int result = 0;
int logicsArray[LOOPCOUNT];
/* Variables for integer tests */
sum = 0;
product = 1;
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
/* variabels for double tests */
dt = 1. / 3.; /* base of geometric row for + and - test*/
dsum = 0.;
/* Variabeles for logic tests */
logics = logicsArray;
logic_and = 1;
logic_or = 0;
/* Variabeles for bit operators tests */
bit_and = 1;
bit_or = 0;
/* Variables for exclusiv bit or */
exclusiv_bit_or = 0;
/************************************************************************/
/** Tests for integers **/
/************************************************************************/
/**** Testing integer addition ****/
#pragma omp parallel
{
int j;
#pragma omp for schedule(dynamic, 1) reduction(+ : sum)
for (j = 1; j <= LOOPCOUNT; j++) {
sum = sum + j;
}
}
if (known_sum != sum) {
result++;
fprintf(stderr,
"Error in sum with integers: Result was %d"
" instead of %d.\n",
sum, known_sum);
}
/**** Testing integer subtracton ****/
diff = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
#pragma omp parallel
{
int j;
#pragma omp for schedule(dynamic, 1) reduction(- : diff)
for (j = 1; j <= LOOPCOUNT; j++) {
diff = diff - j;
}
}
if (diff != 0) {
result++;
fprintf(stderr,
"Error in difference with integers: Result was %d"
" instead of 0.\n",
diff);
}
/**** Testing integer multiplication ****/
#pragma omp parallel
{
int j;
#pragma omp for schedule(dynamic, 1) reduction(* : product)
for (j = 1; j <= MAX_FACTOR; j++) {
product *= j;
}
}
known_product = KNOWN_PRODUCT;
if (known_product != product) {
result++;
fprintf(stderr,
"Error in Product with integers: Result was %d"
" instead of %d\n",
product, known_product);
}
/************************************************************************/
/** Tests for doubles **/
/************************************************************************/
/**** Testing double addition ****/
dsum = 0.;
dpt = 1.;
for (i = 0; i < DOUBLE_DIGITS; ++i) {
dpt *= dt;
}
dknown_sum = (1 - dpt) / (1 - dt);
#pragma omp parallel
{
int j;
#pragma omp for schedule(dynamic, 1) reduction(+ : dsum)
for (j = 0; j < DOUBLE_DIGITS; j++) {
dsum += pow(dt, j);
}
}
if (fabs(dsum - dknown_sum) > rounding_error) {
result++;
fprintf(stderr,
"\nError in sum with doubles: Result was %f"
" instead of: %f (Difference: %E)\n",
dsum, dknown_sum, dsum - dknown_sum);
}
/**** Testing double subtraction ****/
ddiff = (1 - dpt) / (1 - dt);
#pragma omp parallel
{
int j;
#pragma omp for schedule(dynamic, 1) reduction(- : ddiff)
for (j = 0; j < DOUBLE_DIGITS; ++j) {
ddiff -= pow(dt, j);
}
}
if (fabs(ddiff) > rounding_error) {
result++;
fprintf(stderr,
"Error in Difference with doubles: Result was %E"
" instead of 0.0\n",
ddiff);
}
/************************************************************************/
/** Tests for logical values **/
/************************************************************************/
/**** Testing logic and ****/
for (i = 0; i < LOOPCOUNT; i++) {
logics[i] = 1;
}
#pragma omp parallel
{
int j;
#pragma omp for schedule(dynamic, 1) reduction(&& : logic_and)
for (j = 0; j < LOOPCOUNT; ++j) {
logic_and = (logic_and && logics[j]);
}
}
if (!logic_and) {
result++;
fprintf(stderr, "Error in logic AND part 1\n");
}
logic_and = 1;
logics[LOOPCOUNT / 2] = 0;
#pragma omp parallel
{
int j;
#pragma omp for schedule(dynamic, 1) reduction(&& : logic_and)
for (j = 0; j < LOOPCOUNT; ++j) {
logic_and = logic_and && logics[j];
}
}
if (logic_and) {
result++;
fprintf(stderr, "Error in logic AND part 2\n");
}
/**** Testing logic or ****/
for (i = 0; i < LOOPCOUNT; i++) {
logics[i] = 0;
}
#pragma omp parallel
{
int j;
#pragma omp for schedule(dynamic, 1) reduction(|| : logic_or)
for (j = 0; j < LOOPCOUNT; ++j) {
logic_or = logic_or || logics[j];
}
}
if (logic_or) {
result++;
fprintf(stderr, "Error in logic OR part 1\n");
}
logic_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel
{
int j;
#pragma omp for schedule(dynamic, 1) reduction(|| : logic_or)
for (j = 0; j < LOOPCOUNT; ++j) {
logic_or = logic_or || logics[j];
}
}
if (!logic_or) {
result++;
fprintf(stderr, "Error in logic OR part 2\n");
}
/************************************************************************/
/** Tests for bit values **/
/************************************************************************/
/**** Testing bit and ****/
for (i = 0; i < LOOPCOUNT; ++i) {
logics[i] = 1;
}
#pragma omp parallel
{
int j;
#pragma omp for schedule(dynamic, 1) reduction(& : bit_and)
for (j = 0; j < LOOPCOUNT; ++j) {
bit_and = (bit_and & logics[j]);
}
}
if (!bit_and) {
result++;
fprintf(stderr, "Error in BIT AND part 1\n");
}
bit_and = 1;
logics[LOOPCOUNT / 2] = 0;
#pragma omp parallel
{
int j;
#pragma omp for schedule(dynamic, 1) reduction(& : bit_and)
for (j = 0; j < LOOPCOUNT; ++j) {
bit_and = bit_and & logics[j];
}
}
if (bit_and) {
result++;
fprintf(stderr, "Error in BIT AND part 2\n");
}
/**** Testing bit or ****/
for (i = 0; i < LOOPCOUNT; i++) {
logics[i] = 0;
}
#pragma omp parallel
{
int j;
#pragma omp for schedule(dynamic, 1) reduction(| : bit_or)
for (j = 0; j < LOOPCOUNT; ++j) {
bit_or = bit_or | logics[j];
}
}
if (bit_or) {
result++;
fprintf(stderr, "Error in BIT OR part 1\n");
}
bit_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel
{
int j;
#pragma omp for schedule(dynamic, 1) reduction(| : bit_or)
for (j = 0; j < LOOPCOUNT; ++j) {
bit_or = bit_or | logics[j];
}
}
if (!bit_or) {
result++;
fprintf(stderr, "Error in BIT OR part 2\n");
}
/**** Testing exclusive bit or ****/
for (i = 0; i < LOOPCOUNT; i++) {
logics[i] = 0;
}
#pragma omp parallel
{
int j;
#pragma omp for schedule(dynamic, 1) reduction(^ : exclusiv_bit_or)
for (j = 0; j < LOOPCOUNT; ++j) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[j];
}
}
if (exclusiv_bit_or) {
result++;
fprintf(stderr, "Error in EXCLUSIV BIT OR part 1\n");
}
exclusiv_bit_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel
{
int j;
#pragma omp for schedule(dynamic, 1) reduction(^ : exclusiv_bit_or)
for (j = 0; j < LOOPCOUNT; ++j) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[j];
}
}
if (!exclusiv_bit_or) {
result++;
fprintf(stderr, "Error in EXCLUSIV BIT OR part 2\n");
}
return (result == 0);
free(logics);
}
int main(void) {
int i;
int num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_omp_for_reduction()) {
num_failed++;
}
}
return num_failed;
}
|
detector.c | #include "darknet.h"
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
/**
* \brief: 图像检测网络训练函数
*
* \prarm: datacfg 训练数据描述信息文件路径
* cfgfile 神经网络结构配置文件路径
* weightfile 预训练参数文件路径
* gpus GPU 卡号集合 ( 比如使用 1 块 GPU,那么里面只含元素 0,默认
* 使用0 号 GPU; 如果使用 4 块 GPU,那么含有 0,1,2,3 四个
* 元素; 如果不使用 GPU,那么为空指针 )
* ngpus 使用GPUS块数,使用一块GPU和不使用GPU时,nqpus都等于1
* clear 将已训练图片的数量置零, 在命令行用 "-clear" 参数指定.
* 如果是进行微调模型或者是初次训练, 则将 net->seen 置零
* 如果是接着上次断点继续训练, 则不需要置零 net->seen
*
*
* 说明:关于预训练参数文件 weightfile,
*/
void train_detector(char *datacfg, char *cfgfile,
char *weightfile, int *gpus, int ngpus, int clear)
{
// 读入数据配置文件信息, 主要保存了 训练集 / 验证集 图像文件的路径信息和模型保存路径
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
// 它初始化随机种子,会提供一个种子,这个种子会对应一个随机数,如果使用相同的种子后面
// 的 rand() 函数会出现一样的随机数
// 为了防止随机数每次重复,常常使用系统时间来初始化,即使用 time() 函数来获得系统时间
// 当 srand() 的参数值固定的时候,rand() 获得的数也是固定的, 所以一般 srand()
// 的参数用 time(NULL), 因为系统的时间一直在变.
// 如果想在一个程序中生成随机数序列,需要至多在生成随机数之前设置一次随机种子。
srand(time(0));
// 提取文件名(不带后缀),比如提取 cfg/yolov3.cfg 中的 yolov3
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = calloc(ngpus, sizeof(network));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0]; // ngpus 个网络除了 gpu_index 之外完全相同
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate,
net->momentum, net->decay);
data train, buffer;
layer l = net->layers[net->n - 1]; // 输出层
int classes = l.classes; // 训练数据的类别数
float jitter = l.jitter; // 样本增广的抖动因子
// 从 train.txt 文件中读取所有训练样本的路径, 并将其转存为二维字符数组
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net); // 加载和图像增广相关的 args
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
double time;
int count = 0;
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
printf("Resizing\n");
int dim = (rand() % 10 + 10) * 32;
if (get_current_batch(net)+200 > net->max_batches) dim = 608;
//int dim = (rand() % 4 + 16) * 32;
printf("%d\n", dim);
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
time=what_time_is_it_now();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
time=what_time_is_it_now();
float loss = 0;
#ifdef GPU
if(ngpus == 1){
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
if(i%100==0){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class = j;
if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) do_nms_sort(dets, num, classes, nms);
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) do_nms_sort(dets, nboxes, classes, nms);
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths("data/coco_val_5k.list");
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) do_nms_obj(dets, nboxes, 1, nms);
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
image sized = letterbox_image(im, net->w, net->h);
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
float *X = sized.data;
time=what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
//printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes);
free_detections(dets, nboxes);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
cvNamedWindow("predictions", CV_WINDOW_NORMAL);
if(fullscreen){
cvSetWindowProperty("predictions", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
}
show_image(im, "predictions");
cvWaitKey(0);
cvDestroyAllWindows();
#endif
}
free_image(im);
free_image(sized);
if (filename) break;
}
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
}
*/
void run_detector(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .5);
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
|
mg.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB MG code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
//---------------------------------------------------------------------
// program mg
//---------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "globals.h"
#include "randdp.h"
#include "timers.h"
#include "print_results.h"
#include "../my_include/my_include.h"
static void setup(int *n1, int *n2, int *n3);
static void mg3P(double u[], double v[], double r[],
double a[4], double c[4], int n1, int n2, int n3);
static void psinv(void *or, void *ou, int n1, int n2, int n3,
double c[4], int k);
static void resid(void *ou, void *ov, void *or, int n1, int n2, int n3,
double a[4], int k);
static void rprj3(void *or, int m1k, int m2k, int m3k,
void *os, int m1j, int m2j, int m3j, int k);
static void interp(void *oz, int mm1, int mm2, int mm3,
void *ou, int n1, int n2, int n3, int k);
static void norm2u3(void *or, int n1, int n2, int n3,
double *rnm2, double *rnmu,
int nx, int ny, int nz);
static void rep_nrm(void *u, int n1, int n2, int n3, char *title, int kk);
static void comm3(void *ou, int n1, int n2, int n3, int kk);
static void zran3(void *oz, int n1, int n2, int n3, int nx1, int ny1, int k);
static void showall(void *oz, int n1, int n2, int n3);
static double power(double a, int n);
static void bubble(double ten[][2], int j1[][2], int j2[][2], int j3[][2],
int m, int ind);
static void zero3(void *oz, int n1, int n2, int n3);
//-------------------------------------------------------------------------c
// These arrays are in common because they are quite large
// and probably shouldn't be allocated on the stack. They
// are always passed as subroutine args.
//-------------------------------------------------------------------------c
/* commcon /noautom/ */
static double u[NR];
static double v[NR];
static double r[NR];
/* common /grid/ */
static int is1, is2, is3, ie1, ie2, ie3;
/* common /rans_save/ starts */
double starts[NM];
int main()
{
//-------------------------------------------------------------------------c
// k is the current level. It is passed down through subroutine args
// and is NOT global. it is the current iteration
//-------------------------------------------------------------------------c
int k, it;
crucial_data(&u[0], "double", NR);
crucial_data(&r[0], "double", NR);
consistent_data(&it, "int", 1);
double t, tinit, mflops;
double a[4], c[4];
double rnm2, rnmu, old2, oldu, epsilon;
int n1, n2, n3, nit;
double nn, verify_value, err;
logical verified;
int i;
char *t_names[T_last];
double tmax;
for (i = T_init; i < T_last; i++) {
timer_clear(i);
}
timer_start(T_init);
//---------------------------------------------------------------------
// Read in and broadcast input data
//---------------------------------------------------------------------
FILE *fp;
if ((fp = fopen("timer.flag", "r")) != NULL) {
timeron = true;
t_names[T_init] = "init";
t_names[T_bench] = "benchmk";
t_names[T_mg3P] = "mg3P";
t_names[T_psinv] = "psinv";
t_names[T_resid] = "resid";
t_names[T_rprj3] = "rprj3";
t_names[T_interp] = "interp";
t_names[T_norm2] = "norm2";
t_names[T_comm3] = "comm3";
fclose(fp);
} else {
timeron = false;
}
printf("\n\n NAS Parallel Benchmarks (NPB3.3-OMP-C) - MG Benchmark\n\n");
if ((fp = fopen("mg.input.sample", "r")) != NULL) {
int result;
printf(" Reading from input file mg.input\n");
result = fscanf(fp, "%d\n", <);
printf("lt is %d\n", result);
while (fgetc(fp) != '\n');
result = fscanf(fp, "%d%d%d", &nx[lt], &ny[lt], &nz[lt]);
printf("nz\[lt\] is %d %d %d\n", nx[lt], ny[lt], nz[lt]);
while (fgetc(fp) != '\n');
result = fscanf(fp, "%d", &nit);
printf("nit is $d\n", result);
while (fgetc(fp) != '\n');
for (i = 0; i <= 7; i++) {
result = fscanf(fp, "%d", &debug_vec[i]);
printf("%d\n", debug_vec[i]);
}
fclose(fp);
} else {
printf(" No input file. Using compiled defaults \n");
lt = LT_DEFAULT;
nit = NIT_DEFAULT;
nx[lt] = NX_DEFAULT;
ny[lt] = NY_DEFAULT;
nz[lt] = NZ_DEFAULT;
for (i = 0; i <= 7; i++) {
debug_vec[i] = DEBUG_DEFAULT;
}
}
if ( (nx[lt] != ny[lt]) || (nx[lt] != nz[lt]) ) {
Class = 'U';
} else if ( nx[lt] == 32 && nit == 4 ) {
Class = 'S';
} else if ( nx[lt] == 128 && nit == 4 ) {
Class = 'W';
} else if ( nx[lt] == 256 && nit == 4 ) {
Class = 'A';
} else if ( nx[lt] == 256 && nit == 20 ) {
Class = 'B';
} else if ( nx[lt] == 512 && nit == 20 ) {
Class = 'C';
} else if ( nx[lt] == 1024 && nit == 50 ) {
Class = 'D';
} else if ( nx[lt] == 2048 && nit == 50 ) {
Class = 'E';
} else {
Class = 'U';
}
printf("%d\n", nx[lt]);
//---------------------------------------------------------------------
// Use these for debug info:
//---------------------------------------------------------------------
// debug_vec(0) = 1 !=> report all norms
// debug_vec(1) = 1 !=> some setup information
// debug_vec(1) = 2 !=> more setup information
// debug_vec(2) = k => at level k or below, show result of resid
// debug_vec(3) = k => at level k or below, show result of psinv
// debug_vec(4) = k => at level k or below, show result of rprj
// debug_vec(5) = k => at level k or below, show result of interp
// debug_vec(6) = 1 => (unused)
// debug_vec(7) = 1 => (unused)
//---------------------------------------------------------------------
a[0] = -8.0/3.0;
a[1] = 0.0;
a[2] = 1.0/6.0;
a[3] = 1.0/12.0;
if (Class == 'A' || Class == 'S' || Class =='W') {
//---------------------------------------------------------------------
// Coefficients for the S(a) smoother
//---------------------------------------------------------------------
c[0] = -3.0/8.0;
c[1] = +1.0/32.0;
c[2] = -1.0/64.0;
c[3] = 0.0;
} else {
//---------------------------------------------------------------------
// Coefficients for the S(b) smoother
//---------------------------------------------------------------------
c[0] = -3.0/17.0;
c[1] = +1.0/33.0;
c[2] = -1.0/61.0;
c[3] = 0.0;
}
lb = 1;
k = lt;
printf("%d\n", lb);
setup(&n1, &n2, &n3);
printf("%d\n", lb);
zero3(u, n1, n2, n3);
printf("%d\n", lb);
zran3(v, n1, n2, n3, nx[lt], ny[lt], k);
printf("%dend!\n", lb);
norm2u3(v, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]);
// printf("\n");
printf(" norms of random v are\n");
// printf("%4d%19.2f%19.2e\n", 0, rnm2, rnmu);
// printf(" about to evaluate resid, k=%d\n", k);
printf(" Size: %4dx%4dx%4d (class %c)\n", nx[lt], ny[lt], nz[lt], Class);
printf(" Iterations: %5d\n", nit);
printf(" Number of available threads: %5d\n", omp_get_max_threads());
printf("\n");
resid(u, v, r, n1, n2, n3, a, k);
norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]);
old2 = rnm2;
oldu = rnmu;
//---------------------------------------------------------------------
// One iteration for startup
//---------------------------------------------------------------------
mg3P(u, v, r, a, c, n1, n2, n3);
resid(u, v, r, n1, n2, n3, a, k);
setup(&n1, &n2, &n3);
zero3(u, n1, n2, n3);
zran3(v, n1, n2, n3, nx[lt], ny[lt], k);
timer_stop(T_init);
tinit = timer_read(T_init);
printf(" Initialization time: %15.3f seconds\n\n", tinit);
for (i = T_bench; i < T_last; i++) {
timer_clear(i);
}
timer_start(T_bench);
if (timeron) timer_start(T_resid2);
resid(u, v, r, n1, n2, n3, a, k);
if (timeron) timer_stop(T_resid2);
norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]);
old2 = rnm2;
oldu = rnmu;
flush_whole_cache();
start_crash();
int first = 0;
for (it = 1; it <= nit; it++) {
if(it == 3 && first == 0)
{ it = 1; first = 1;}
printf("it = %d\n",it);
if ((it == 1) || (it == nit) || ((it % 5) == 0)) {
printf(" iter %3d\n", it);
}
if (timeron) timer_start(T_mg3P);
mg3P(u, v, r, a, c, n1, n2, n3);
if (timeron) timer_stop(T_mg3P);
if (timeron) timer_start(T_resid2);
resid(u, v, r, n1, n2, n3, a, k);
if (timeron) timer_stop(T_resid2);
clwb(&it);
}
end_crash();
norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]);
timer_stop(T_bench);
t = timer_read(T_bench);
verified = false;
verify_value = 0.0;
printf("\n Benchmark completed\n");
epsilon = 1.0e-8;
if (Class != 'U') {
if (Class == 'S') {
verify_value = 0.5307707005734e-04;
} else if (Class == 'W') {
verify_value = 0.6467329375339e-05;
} else if (Class == 'A') {
verify_value = 0.2433365309069e-05;
} else if (Class == 'B') {
verify_value = 0.1800564401355e-05;
} else if (Class == 'C') {
verify_value = 0.5706732285740e-06;
} else if (Class == 'D') {
verify_value = 0.1583275060440e-09;
} else if (Class == 'E') {
verify_value = 0.5630442584711e-10;
}
err = fabs( rnm2 - verify_value ) / verify_value;
if (err <= epsilon) {
verified = true;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" L2 Norm is %20.13E\n", rnm2);
printf(" Error is %20.13E\n", err);
} else {
verified = false;
printf(" VERIFICATION FAILED\n");
printf(" L2 Norm is %20.13E\n", rnm2);
printf(" The correct L2 Norm is %20.13E\n", verify_value);
}
} else {
verified = false;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
printf(" L2 Norm is %20.13E\n", rnm2);
}
nn = 1.0 * nx[lt] * ny[lt] * nz[lt];
if (t != 0.0) {
mflops = 58.0 * nit * nn * 1.0e-6 / t;
} else {
mflops = 0.0;
}
print_results("MG", Class, nx[lt], ny[lt], nz[lt],
nit, t,
mflops, " floating point",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
//---------------------------------------------------------------------
// More timers
//---------------------------------------------------------------------
if (timeron) {
tmax = timer_read(T_bench);
if (tmax == 0.0) tmax = 1.0;
printf(" SECTION Time (secs)\n");
for (i = T_bench; i < T_last; i++) {
t = timer_read(i);
if (i == T_resid2) {
t = timer_read(T_resid) - t;
printf(" --> %8s:%9.3f (%6.2f%%)\n", "mg-resid", t, t*100./tmax);
} else {
printf(" %-8s:%9.3f (%6.2f%%)\n", t_names[i], t, t*100./tmax);
}
}
}
return 0;
}
static void setup(int *n1, int *n2, int *n3)
{
int k, j;
int ax, mi[MAXLEVEL+1][3];
int ng[MAXLEVEL+1][3];
ng[lt][0] = nx[lt];
ng[lt][1] = ny[lt];
ng[lt][2] = nz[lt];
for (k = lt-1; k >= 1; k--) {
for (ax = 0; ax < 3; ax++) {
ng[k][ax] = ng[k+1][ax]/2;
}
}
for (k = lt; k >= 1; k--) {
nx[k] = ng[k][0];
ny[k] = ng[k][1];
nz[k] = ng[k][2];
}
for (k = lt; k >= 1; k--) {
for (ax = 0; ax < 3; ax++) {
mi[k][ax] = 2 + ng[k][ax];
}
m1[k] = mi[k][0];
m2[k] = mi[k][1];
m3[k] = mi[k][2];
}
k = lt;
is1 = 2 + ng[k][0] - ng[lt][0];
ie1 = 1 + ng[k][0];
*n1 = 3 + ie1 - is1;
is2 = 2 + ng[k][1] - ng[lt][1];
ie2 = 1 + ng[k][1];
*n2 = 3 + ie2 - is2;
is3 = 2 + ng[k][2] - ng[lt][2];
ie3 = 1 + ng[k][2];
*n3 = 3 + ie3 - is3;
ir[lt] = 0;
for (j = lt-1; j >= 1; j--) {
ir[j] = ir[j+1]+ONE*m1[j+1]*m2[j+1]*m3[j+1];
}
if (debug_vec[1] >= 1) {
printf(" in setup, \n");
printf(" k lt nx ny nz n1 n2 n3 is1 is2 is3 ie1 ie2 ie3\n");
printf("%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d\n",
k,lt,ng[k][0],ng[k][1],ng[k][2],*n1,*n2,*n3,is1,is2,is3,ie1,ie2,ie3);
}
}
//---------------------------------------------------------------------
// multigrid V-cycle routine
//---------------------------------------------------------------------
static void mg3P(double u[], double v[], double r[],
double a[4], double c[4], int n1, int n2, int n3)
{
int j, k;
//---------------------------------------------------------------------
// down cycle.
// restrict the residual from the find grid to the coarse
//---------------------------------------------------------------------
for (k = lt; k >= lb+1; k--) {
j = k - 1;
rprj3(&r[ir[k]], m1[k], m2[k], m3[k],
&r[ir[j]], m1[j], m2[j], m3[j], k);
}
k = lb;
//---------------------------------------------------------------------
// compute an approximate solution on the coarsest grid
//---------------------------------------------------------------------
zero3(&u[ir[k]], m1[k], m2[k], m3[k]);
psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k);
for (k = lb+1; k <= lt-1; k++) {
j = k - 1;
//---------------------------------------------------------------------
// prolongate from level k-1 to k
//---------------------------------------------------------------------
zero3(&u[ir[k]], m1[k], m2[k], m3[k]);
interp(&u[ir[j]], m1[j], m2[j], m3[j], &u[ir[k]], m1[k], m2[k], m3[k], k);
//---------------------------------------------------------------------
// compute residual for level k
//---------------------------------------------------------------------
resid(&u[ir[k]], &r[ir[k]], &r[ir[k]], m1[k], m2[k], m3[k], a, k);
//---------------------------------------------------------------------
// apply smoother
//---------------------------------------------------------------------
psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k);
}
j = lt - 1;
k = lt;
interp(&u[ir[j]], m1[j], m2[j], m3[j], u, n1, n2, n3, k);
resid(u, v, r, n1, n2, n3, a, k);
psinv(r, u, n1, n2, n3, c, k);
}
//---------------------------------------------------------------------
// psinv applies an approximate inverse as smoother: u = u + Cr
//
// This implementation costs 15A + 4M per result, where
// A and M denote the costs of Addition and Multiplication.
// Presuming coefficient c(3) is zero (the NPB assumes this,
// but it is thus not a general case), 2A + 1M may be eliminated,
// resulting in 13A + 3M.
// Note that this vectorizes, and is also fine for cache
// based machines.
//---------------------------------------------------------------------
static void psinv(void *or, void *ou, int n1, int n2, int n3,
double c[4], int k)
{
double (*r)[n2][n1] = (double (*)[n2][n1])or;
double (*u)[n2][n1] = (double (*)[n2][n1])ou;
int i3, i2, i1;
double r1[M], r2[M];
if (timeron) timer_start(T_psinv);
#pragma omp parallel for default(shared) private(i1,i2,i3,r1,r2)
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 0; i1 < n1; i1++) {
r1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1]
+ r[i3-1][i2][i1] + r[i3+1][i2][i1];
r2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1]
+ r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];
}
for (i1 = 1; i1 < n1-1; i1++) {
u[i3][i2][i1] = u[i3][i2][i1]
+ c[0] * r[i3][i2][i1]
+ c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1]
+ r1[i1] )
+ c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] );
//--------------------------------------------------------------------
// Assume c[3] = 0 (Enable line below if c[3] not= 0)
//--------------------------------------------------------------------
// + c[3] * ( r2[i1-1] + r2[i1+1] )
//--------------------------------------------------------------------
}
}
}
if (timeron) timer_stop(T_psinv);
//---------------------------------------------------------------------
// exchange boundary points
//---------------------------------------------------------------------
comm3(u, n1, n2, n3, k);
if (debug_vec[0] >= 1) {
rep_nrm(u, n1, n2, n3, " psinv", k);
}
if (debug_vec[3] >= k) {
showall(u, n1, n2, n3);
}
}
//---------------------------------------------------------------------
// resid computes the residual: r = v - Au
//
// This implementation costs 15A + 4M per result, where
// A and M denote the costs of Addition (or Subtraction) and
// Multiplication, respectively.
// Presuming coefficient a(1) is zero (the NPB assumes this,
// but it is thus not a general case), 3A + 1M may be eliminated,
// resulting in 12A + 3M.
// Note that this vectorizes, and is also fine for cache
// based machines.
//---------------------------------------------------------------------
static void resid(void *ou, void *ov, void *or, int n1, int n2, int n3,
double a[4], int k)
{
double (*u)[n2][n1] = (double (*)[n2][n1])ou;
double (*v)[n2][n1] = (double (*)[n2][n1])ov;
double (*r)[n2][n1] = (double (*)[n2][n1])or;
int i3, i2, i1;
double u1[M], u2[M];
if (timeron) timer_start(T_resid);
#pragma omp parallel for default(shared) private(i1,i2,i3,u1,u2)
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 0; i1 < n1; i1++) {
u1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1]
+ u[i3-1][i2][i1] + u[i3+1][i2][i1];
u2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1]
+ u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1];
}
for (i1 = 1; i1 < n1-1; i1++) {
r[i3][i2][i1] = v[i3][i2][i1]
- a[0] * u[i3][i2][i1]
//-------------------------------------------------------------------
// Assume a[1] = 0 (Enable 2 lines below if a[1] not= 0)
//-------------------------------------------------------------------
// - a[1] * ( u[i3][i2][i1-1] + u[i3][i2][i1+1]
// + u1[i1] )
//-------------------------------------------------------------------
- a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] )
- a[3] * ( u2[i1-1] + u2[i1+1] );
}
}
}
if (timeron) timer_stop(T_resid);
//---------------------------------------------------------------------
// exchange boundary data
//---------------------------------------------------------------------
comm3(r, n1, n2, n3, k);
if (debug_vec[0] >= 1) {
rep_nrm(r, n1, n2, n3, " resid", k);
}
if (debug_vec[2] >= k) {
showall(r, n1, n2, n3);
}
}
//---------------------------------------------------------------------
// rprj3 projects onto the next coarser grid,
// using a trilinear Finite Element projection: s = r' = P r
//
// This implementation costs 20A + 4M per result, where
// A and M denote the costs of Addition and Multiplication.
// Note that this vectorizes, and is also fine for cache
// based machines.
//---------------------------------------------------------------------
static void rprj3(void *or, int m1k, int m2k, int m3k,
void *os, int m1j, int m2j, int m3j, int k)
{
double (*r)[m2k][m1k] = (double (*)[m2k][m1k])or;
double (*s)[m2j][m1j] = (double (*)[m2j][m1j])os;
int j3, j2, j1, i3, i2, i1, d1, d2, d3, j;
double x1[M], y1[M], x2, y2;
if (timeron) timer_start(T_rprj3);
if (m1k == 3) {
d1 = 2;
} else {
d1 = 1;
}
if (m2k == 3) {
d2 = 2;
} else {
d2 = 1;
}
if (m3k == 3) {
d3 = 2;
} else {
d3 = 1;
}
#pragma omp parallel for default(shared) \
private(j1,j2,j3,i1,i2,i3,x1,y1,x2,y2)
for (j3 = 1; j3 < m3j-1; j3++) {
i3 = 2*j3-d3;
for (j2 = 1; j2 < m2j-1; j2++) {
i2 = 2*j2-d2;
for (j1 = 1; j1 < m1j; j1++) {
i1 = 2*j1-d1;
x1[i1] = r[i3+1][i2 ][i1] + r[i3+1][i2+2][i1]
+ r[i3 ][i2+1][i1] + r[i3+2][i2+1][i1];
y1[i1] = r[i3 ][i2 ][i1] + r[i3+2][i2 ][i1]
+ r[i3 ][i2+2][i1] + r[i3+2][i2+2][i1];
}
for (j1 = 1; j1 < m1j-1; j1++) {
i1 = 2*j1-d1;
y2 = r[i3 ][i2 ][i1+1] + r[i3+2][i2 ][i1+1]
+ r[i3 ][i2+2][i1+1] + r[i3+2][i2+2][i1+1];
x2 = r[i3+1][i2 ][i1+1] + r[i3+1][i2+2][i1+1]
+ r[i3 ][i2+1][i1+1] + r[i3+2][i2+1][i1+1];
s[j3][j2][j1] =
0.5 * r[i3+1][i2+1][i1+1]
+ 0.25 * (r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2)
+ 0.125 * (x1[i1] + x1[i1+2] + y2)
+ 0.0625 * (y1[i1] + y1[i1+2]);
}
}
}
if (timeron) timer_stop(T_rprj3);
j = k-1;
comm3(s, m1j, m2j, m3j, j);
if (debug_vec[0] >= 1) {
rep_nrm(s, m1j, m2j, m3j, " rprj3", k-1);
}
if (debug_vec[4] >= k) {
showall(s, m1j, m2j, m3j);
}
}
//---------------------------------------------------------------------
// interp adds the trilinear interpolation of the correction
// from the coarser grid to the current approximation: u = u + Qu'
//
// Observe that this implementation costs 16A + 4M, where
// A and M denote the costs of Addition and Multiplication.
// Note that this vectorizes, and is also fine for cache
// based machines. Vector machines may get slightly better
// performance however, with 8 separate "do i1" loops, rather than 4.
//---------------------------------------------------------------------
static void interp(void *oz, int mm1, int mm2, int mm3,
void *ou, int n1, int n2, int n3, int k)
{
double (*z)[mm2][mm1] = (double (*)[mm2][mm1])oz;
double (*u)[n2][n1] = (double (*)[n2][n1])ou;
int i3, i2, i1, d1, d2, d3, t1, t2, t3;
// note that m = 1037 in globals.h but for this only need to be
// 535 to handle up to 1024^3
// integer m
// parameter( m=535 )
double z1[M], z2[M], z3[M];
if (timeron) timer_start(T_interp);
if (n1 != 3 && n2 != 3 && n3 != 3) {
#pragma omp parallel for default(shared) private(i1,i2,i3,z1,z2,z3)
for (i3 = 0; i3 < mm3-1; i3++) {
for (i2 = 0; i2 < mm2-1; i2++) {
for (i1 = 0; i1 < mm1; i1++) {
z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1];
z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1];
z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1];
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1]
+ z[i3][i2][i1];
u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1]
+ 0.5 * (z[i3][i2][i1+1] + z[i3][i2][i1]);
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1]
+ 0.5 * z1[i1];
u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]
+ 0.25 * (z1[i1] + z1[i1+1]);
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1]
+ 0.5 * z2[i1];
u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]
+ 0.25 * (z2[i1] + z2[i1+1]);
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1]
+ 0.25 * z3[i1];
u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1]
+ 0.125 * (z3[i1] + z3[i1+1]);
}
}
}
} else {
if (n1 == 3) {
d1 = 2;
t1 = 1;
} else {
d1 = 1;
t1 = 0;
}
if (n2 == 3) {
d2 = 2;
t2 = 1;
} else {
d2 = 1;
t2 = 0;
}
if (n3 == 3) {
d3 = 2;
t3 = 1;
} else {
d3 = 1;
t3 = 0;
}
#pragma omp parallel default(shared) private(i1,i2,i3)
{
#pragma omp for
for (i3 = d3; i3 <= mm3-1; i3++) {
for (i2 = d2; i2 <= mm2-1; i2++) {
for (i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] =
u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1]
+ z[i3-1][i2-1][i1-1];
}
for (i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] =
u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1]
+ 0.5 * (z[i3-1][i2-1][i1] + z[i3-1][i2-1][i1-1]);
}
}
for (i2 = 1; i2 <= mm2-1; i2++) {
for (i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] =
u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1]
+ 0.5 * (z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]);
}
for (i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] =
u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1]
+ 0.25 * (z[i3-1][i2][i1] + z[i3-1][i2-1][i1]
+ z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]);
}
}
}
#pragma omp for nowait
for (i3 = 1; i3 <= mm3-1; i3++) {
for (i2 = d2; i2 <= mm2-1; i2++) {
for (i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] =
u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1]
+ 0.5 * (z[i3][i2-1][i1-1] + z[i3-1][i2-1][i1-1]);
}
for (i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] =
u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1]
+ 0.25 * (z[i3 ][i2-1][i1] + z[i3 ][i2-1][i1-1]
+ z[i3-1][i2-1][i1] + z[i3-1][i2-1][i1-1]);
}
}
for (i2 = 1; i2 <= mm2-1; i2++) {
for (i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] =
u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1]
+ 0.25 * (z[i3 ][i2][i1-1] + z[i3 ][i2-1][i1-1]
+ z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]);
}
for (i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] =
u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1]
+ 0.125 * (z[i3 ][i2][i1 ] + z[i3 ][i2-1][i1 ]
+ z[i3 ][i2][i1-1] + z[i3 ][i2-1][i1-1]
+ z[i3-1][i2][i1 ] + z[i3-1][i2-1][i1 ]
+ z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]);
}
}
}
} // end parallel
}
if (timeron) timer_stop(T_interp);
if (debug_vec[0] >= 1) {
rep_nrm(z, mm1, mm2, mm3, "z: inter", k-1);
rep_nrm(u, n1, n2, n3, "u: inter", k);
}
if (debug_vec[5] >= k) {
showall(z, mm1, mm2, mm3);
showall(u, n1, n2, n3);
}
}
//---------------------------------------------------------------------
// norm2u3 evaluates approximations to the L2 norm and the
// uniform (or L-infinity or Chebyshev) norm, under the
// assumption that the boundaries are periodic or zero. Add the
// boundaries in with half weight (quarter weight on the edges
// and eighth weight at the corners) for inhomogeneous boundaries.
//---------------------------------------------------------------------
static void norm2u3(void *or, int n1, int n2, int n3,
double *rnm2, double *rnmu,
int nx, int ny, int nz)
{
double (*r)[n2][n1] = (double (*)[n2][n1])or;
double s, a;
int i3, i2, i1;
double dn, max_rnmu;
if (timeron) timer_start(T_norm2);
dn = 1.0*nx*ny*nz;
s = 0.0;
max_rnmu = 0.0;
#pragma omp parallel default(shared) private(i1,i2,i3,a) reduction(+:s)
{
double my_rnmu = 0.0;
#pragma omp for nowait
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 1; i1 < n1-1; i1++) {
s = s + pow(r[i3][i2][i1], 2.0);
a = fabs(r[i3][i2][i1]);
my_rnmu = (a > my_rnmu) ? a : my_rnmu;
}
}
}
if (my_rnmu > max_rnmu) {
#pragma omp critical
max_rnmu = (my_rnmu > max_rnmu) ? my_rnmu : max_rnmu;
}
} // end parallel
*rnmu = max_rnmu;
*rnm2 = sqrt(s / dn);
if (timeron) timer_stop(T_norm2);
}
//---------------------------------------------------------------------
// report on norm
//---------------------------------------------------------------------
static void rep_nrm(void *u, int n1, int n2, int n3, char *title, int kk)
{
double rnm2, rnmu;
norm2u3(u, n1, n2, n3, &rnm2, &rnmu, nx[kk], ny[kk], nz[kk]);
printf(" Level%2d in %8s: norms =%21.14E%21.14E\n", kk, title, rnm2, rnmu);
}
//---------------------------------------------------------------------
// comm3 organizes the communication on all borders
//---------------------------------------------------------------------
static void comm3(void *ou, int n1, int n2, int n3, int kk)
{
double (*u)[n2][n1] = (double (*)[n2][n1])ou;
int i1, i2, i3;
if (timeron) timer_start(T_comm3);
#pragma omp parallel default(shared) private(i1,i2,i3)
{
#pragma omp for
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
u[i3][i2][ 0] = u[i3][i2][n1-2];
u[i3][i2][n1-1] = u[i3][i2][ 1];
}
// }
// for (i3 = 1; i3 < n3-1; i3++) {
for (i1 = 0; i1 < n1; i1++) {
u[i3][ 0][i1] = u[i3][n2-2][i1];
u[i3][n2-1][i1] = u[i3][ 1][i1];
}
}
#pragma omp for nowait
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
u[ 0][i2][i1] = u[n3-2][i2][i1];
u[n3-1][i2][i1] = u[ 1][i2][i1];
}
}
} // end parallel
if (timeron) timer_stop(T_comm3);
}
//---------------------------------------------------------------------
// zran3 loads +1 at ten randomly chosen points,
// loads -1 at a different ten random points,
// and zero elsewhere.
//---------------------------------------------------------------------
static void zran3(void *oz, int n1, int n2, int n3, int nx1, int ny1, int k)
{
double (*z)[n2][n1] = (double (*)[n2][n1])oz;
int i0, mm0, mm1;
int i1, i2, i3, d1, e1, e2, e3;
double xx, x0, x1, a1, a2, ai;
const int mm = 10;
const double a = pow(5.0, 13.0);
const double x = 314159265.0;
double ten[mm][2], best0, best1;
int i, j1[mm][2], j2[mm][2], j3[mm][2];
int jg[4][mm][2];
double rdummy;
int myid, num_threads;
a1 = power(a, nx1);
a2 = power(a, nx1*ny1);
zero3(z, n1, n2, n3);
i = is1-2+nx1*(is2-2+ny1*(is3-2));
ai = power(a, i);
d1 = ie1 - is1 + 1;
e1 = ie1 - is1 + 2;
e2 = ie2 - is2 + 2;
e3 = ie3 - is3 + 2;
x0 = x;
rdummy = randlc(&x0, ai);
//---------------------------------------------------------------------
// save the starting seeds for the following loop
//---------------------------------------------------------------------
for (i3 = 1; i3 < e3; i3++) {
starts[i3] = x0;
rdummy = randlc(&x0, a2);
}
//---------------------------------------------------------------------
// fill array
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(i2,i3,x1,xx,rdummy) \
shared(e2,e3,d1,a1)
for (i3 = 1; i3 < e3; i3++) {
x1 = starts[i3];
for (i2 = 1; i2 < e2; i2++) {
xx = x1;
vranlc(d1, &xx, a, &(z[i3][i2][1]));
rdummy = randlc(&x1, a1);
}
}
//---------------------------------------------------------------------
// comm3(z,n1,n2,n3);
// showall(z,n1,n2,n3);
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// each thread looks for twenty candidates
//---------------------------------------------------------------------
#pragma omp parallel default(shared) private(i,i0,i1,i2,i3,j1,j2,j3,ten, \
myid,num_threads) shared(best0,best1,n1,n2,n3)
{
for (i = 0; i < mm; i++) {
ten[i][1] = 0.0;
j1[i][1] = 0;
j2[i][1] = 0;
j3[i][1] = 0;
ten[i][0] = 1.0;
j1[i][0] = 0;
j2[i][0] = 0;
j3[i][0] = 0;
}
#pragma omp for
for (i3 = 1; i3 < n3-1; i3++) {
double (*zi3)[n1] = z[i3];
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 1; i1 < n1-1; i1++) {
if (zi3[i2][i1] > ten[0][1]) {
ten[0][1] = zi3[i2][i1];
j1[0][1] = i1;
j2[0][1] = i2;
j3[0][1] = i3;
bubble(ten, j1, j2, j3, mm, 1);
}
if (zi3[i2][i1] < ten[0][0]) {
ten[0][0] = zi3[i2][i1];
j1[0][0] = i1;
j2[0][0] = i2;
j3[0][0] = i3;
bubble(ten, j1, j2, j3, mm, 0);
}
}
}
}
//---------------------------------------------------------------------
// Now which of these are globally best?
//---------------------------------------------------------------------
i1 = mm - 1;
i0 = mm - 1;
myid = 0;
myid = omp_get_thread_num();
num_threads = omp_get_num_threads();
for (i = mm - 1; i >= 0; i--) {
// ... ORDERED access is required here for sequential consistency
// ... in case that two values are identical.
// ... Since an "ORDERED" section is only defined in OpenMP 2,
// ... we use a dummy loop to emulate ordered access in OpenMP 1.x.
#pragma omp master
{
best1 = 0.0;
best0 = 1.0;
}
#pragma omp for ordered schedule(static)
for (i2 = 1; i2 <= num_threads; i2++) {
#pragma omp ordered
{
if (ten[i1][1] > best1) {
best1 = ten[i1][1];
jg[0][i][1] = myid;
}
if (ten[i0][0] < best0) {
best0 = ten[i0][0];
jg[0][i][0] = myid;
}
}
}
if (myid == jg[0][i][1]) {
jg[1][i][1] = j1[i1][1];
jg[2][i][1] = j2[i1][1];
jg[3][i][1] = j3[i1][1];
i1 = i1-1;
}
if (myid == jg[0][i][0]) {
jg[1][i][0] = j1[i0][0];
jg[2][i][0] = j2[i0][0];
jg[3][i][0] = j3[i0][0];
i0 = i0-1;
}
}
} // end parallel
// mm1 = i1+1;
// mm0 = i0+1;
mm1 = 0;
mm0 = 0;
/*
int cnt = 0;
printf(" \n");
printf(" negative charges at\n");
for (i = 0; i < mm; i++) {
printf(" (%3d,%3d,%3d)", jg[1][i][0], jg[2][i][0], jg[3][i][0]);
if (++cnt % 5 == 0) printf("\n");
}
cnt = 0;
printf(" positive charges at\n");
for (i = 0; i < mm; i++) {
printf(" (%3d,%3d,%3d)", jg[1][i][1], jg[2][i][1], jg[3][i][1]);
if (++cnt % 5 == 0) printf("\n");
}
cnt = 0;
printf(" small random numbers were\n");
for (i = mm-1; i >= 0; i--) {
printf(" %15.8E", ten[i][0]);
if (++cnt % 5 == 0) printf("\n");
}
cnt = 0;
printf(" and they were found on processor number\n");
for (i = mm-1; i >= 0; i--) {
printf(" %4d", jg[0][i][0]);
if (++cnt % 10 == 0) printf("\n");
}
cnt = 0;
printf(" large random numbers were\n");
for (i = mm-1; i >= 0; i--) {
printf(" %15.8E", ten[i][1]);
if (++cnt % 5 == 0) printf("\n");
}
cnt = 0;
printf(" and they were found on processor number\n");
for (i = mm-1; i >= 0; i--) {
printf(" %4d", jg[0][i][1]);
if (++cnt % 10 == 0) printf("\n");
}
*/
#pragma omp parallel for default(shared) private(i1,i2,i3)
for (i3 = 0; i3 < n3; i3++) {
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
z[i3][i2][i1] = 0.0;
}
}
}
for (i = mm-1; i >= mm0; i--) {
z[jg[3][i][0]][jg[2][i][0]][jg[1][i][0]] = -1.0;
}
for (i = mm-1; i >= mm1; i--) {
z[jg[3][i][1]][jg[2][i][1]][jg[1][i][1]] = +1.0;
}
comm3(z, n1, n2, n3, k);
//---------------------------------------------------------------------
// showall(z,n1,n2,n3);
//---------------------------------------------------------------------
}
static void showall(void *oz, int n1, int n2, int n3)
{
double (*z)[n2][n1] = (double (*)[n2][n1])oz;
int i1, i2, i3;
int m1, m2, m3;
m1 = min(n1, 18);
m2 = min(n2, 14);
m3 = min(n3, 18);
printf(" \n");
for (i3 = 0; i3 < m3; i3++) {
for (i1 = 0; i1 < m1; i1++) {
for (i2 = 0; i2 < m2; i2++) {
printf("%6.3f", z[i3][i2][i1]);
}
printf("\n");
}
printf(" - - - - - - - \n");
}
printf(" \n");
}
//---------------------------------------------------------------------
// power raises an integer, disguised as a double
// precision real, to an integer power
//---------------------------------------------------------------------
static double power(double a, int n)
{
double aj;
int nj;
double rdummy;
double power;
power = 1.0;
nj = n;
aj = a;
while (nj != 0) {
if ((nj % 2) == 1) rdummy = randlc(&power, aj);
rdummy = randlc(&aj, aj);
nj = nj/2;
}
return power;
}
//---------------------------------------------------------------------
// bubble does a bubble sort in direction dir
//---------------------------------------------------------------------
static void bubble(double ten[][2], int j1[][2], int j2[][2], int j3[][2],
int m, int ind)
{
double temp;
int i, j_temp;
if (ind == 1) {
for (i = 0; i < m-1; i++) {
if (ten[i][ind] > ten[i+1][ind]) {
temp = ten[i+1][ind];
ten[i+1][ind] = ten[i][ind];
ten[i][ind] = temp;
j_temp = j1[i+1][ind];
j1[i+1][ind] = j1[i][ind];
j1[i][ind] = j_temp;
j_temp = j2[i+1][ind];
j2[i+1][ind] = j2[i][ind];
j2[i][ind] = j_temp;
j_temp = j3[i+1][ind];
j3[i+1][ind] = j3[i][ind];
j3[i][ind] = j_temp;
} else {
return;
}
}
} else {
for (i = 0; i < m-1; i++) {
if (ten[i][ind] < ten[i+1][ind]) {
temp = ten[i+1][ind];
ten[i+1][ind] = ten[i][ind];
ten[i][ind] = temp;
j_temp = j1[i+1][ind];
j1[i+1][ind] = j1[i][ind];
j1[i][ind] = j_temp;
j_temp = j2[i+1][ind];
j2[i+1][ind] = j2[i][ind];
j2[i][ind] = j_temp;
j_temp = j3[i+1][ind];
j3[i+1][ind] = j3[i][ind];
j3[i][ind] = j_temp;
} else {
return;
}
}
}
}
static void zero3(void *oz, int n1, int n2, int n3)
{
double (*z)[n2][n1] = (double (*)[n2][n1])oz;
int i1, i2, i3;
#pragma omp parallel for default(shared) private(i1,i2,i3)
for (i3 = 0; i3 < n3; i3++) {
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
z[i3][i2][i1] = 0.0;
}
}
}
}
|
energy.h | #pragma once
#include "bonds.h"
#include "externalpotential.h" // Energybase implemented here
#include "sasa.h"
#include "space.h"
#include "aux/iteratorsupport.h"
#include "aux/pairmatrix.h"
#include <range/v3/range/conversion.hpp>
#include <range/v3/view/iota.hpp>
#include <range/v3/view/subrange.hpp>
#include <Eigen/Dense>
#include <spdlog/spdlog.h>
#include <numeric>
#include <algorithm>
struct freesasa_parameters_fwd; // workaround for freesasa unnamed struct that cannot be forward declared
#if defined(__cpp_lib_parallel_algorithm) && __has_include(<tbb/tbb.h>)
#include <execution>
#endif
#if defined(__cpp_lib_parallel_algorithm) && \
__has_include(<tbb/tbb.h>) && ((defined(__clang__) && __clang_major__ >= 10) || (defined(__GNUC__) && __GNUC__ >= 10))
#define HAS_PARALLEL_TRANSFORM_REDUCE
#endif
namespace Faunus {
namespace ReactionCoordinate {
class ReactionCoordinateBase;
}
namespace Potential {
struct PairPotentialBase;
}
/**
* @par Non-bonded energy
*
* Several classes (class templates) are used together to allow computation in change of the non-bonded energy upon
* a MC move.
*
* The energy change is calculated by the Nonbonded class. It internally uses one of the pairing policies
* to efficiently get all pair interactions affected by the MC move (as described by the Change object).
*
* Pairing policies allow efficient summation of pair energies over the whole system, between groups, inside a group,
* etc. The pairing policy is optimized for performance in a different execution environment, e.g., sequential or
* OMP parallelism.
*
* Policies have direct access to the pair interaction energy functor represented by a simple PairEnergy template.
* Furthermore, the GroupCutoff object is provided to limit free energy computation using a cutoff distance between
* respective groups.
*
* @see Nonbonded, PairingBasePolicy, PairEnergy, GroupCutoff
*/
namespace Energy {
class Hamiltonian;
/**
* @brief Check if particles are outside the simulation container
*
* If any particles is ouside, infinite energy is returned; zero otherwirse.
* This is not needed for cuboidal geometry as particles are always wrapped using PBC.
*/
class ContainerOverlap : public Energybase {
private:
const Space& spc;
bool groupIsOutsideContainer(const Change::GroupChange& group_change) const;
double energyOfAllGroups() const;
public:
explicit ContainerOverlap(const Space& spc);
double energy(Change& change) override;
};
/**
* @brief Data class for Ewald k-space calculations
*
* Currently, the Eigen policies map to the non-eigen
* variants, e.g. `PBCEigen == PBC`.
*
* Related reading:
* - PBC Ewald (DOI:10.1063/1.481216)
* - IPBC Ewald (DOI:10/css8)
* - Update optimization (DOI:10.1063/1.481216, Eq. 24)
*/
struct EwaldData {
typedef std::complex<double> Tcomplex;
Eigen::Matrix3Xd k_vectors; //!< k-vectors, 3xK
Eigen::VectorXd Aks; //!< 1xK for update optimization (see Eq.24, DOI:10.1063/1.481216)
Eigen::VectorXcd Q_ion, Q_dipole; //!< Complex 1xK vectors
double r_cutoff = 0; //!< Real-space cutoff
double n_cutoff = 0; //!< Inverse space cutoff
double surface_dielectric_constant = 0; //!< Surface dielectric constant;
double bjerrum_length = 0; //!< Bjerrum length
double kappa = 0; //!< Inverse Debye screening length
double kappa_squared = 0; //!< Squared inverse Debye screening length
double alpha = 0;
double const_inf = 0;
double check_k2_zero = 0;
bool use_spherical_sum = true;
int num_kvectors = 0;
Point box_length = {0.0, 0.0, 0.0}; //!< Box dimensions
enum Policies { PBC, PBCEigen, IPBC, IPBCEigen, INVALID }; //!< Possible k-space updating schemes
Policies policy = PBC; //!< Policy for updating k-space
EwaldData(const json &); //!< Initialize from json
};
NLOHMANN_JSON_SERIALIZE_ENUM(EwaldData::Policies, {
{EwaldData::INVALID, nullptr},
{EwaldData::PBC, "PBC"},
{EwaldData::PBCEigen, "PBCEigen"},
{EwaldData::IPBC, "IPBC"},
{EwaldData::IPBCEigen, "IPBCEigen"},
})
void to_json(json &, const EwaldData &);
/**
* @brief Base class for Ewald k-space updates policies
*/
class EwaldPolicyBase {
public:
std::string cite; //!< Optional reference, preferably DOI, to further information
virtual ~EwaldPolicyBase() = default;
virtual void updateBox(EwaldData &, const Point &) const = 0; //!< Prepare k-vectors according to given box vector
virtual void updateComplex(EwaldData&,
Space::GroupVector&) const = 0; //!< Update all k vectors
virtual void updateComplex(EwaldData&, Change&, Space::GroupVector&,
Space::GroupVector&) const = 0; //!< Update subset of k vectors. Require `old` pointer
virtual double selfEnergy(const EwaldData&, Change&,
Space::GroupVector&) = 0; //!< Self energy contribution due to a change
virtual double surfaceEnergy(const EwaldData&, Change&,
Space::GroupVector&) = 0; //!< Surface energy contribution due to a change
virtual double reciprocalEnergy(const EwaldData &) = 0; //!< Total reciprocal energy
/**
* @brief Represent charges and positions using an Eigen facade (Map)
*
* Requires that all groups are fully active, i.e. does not work for GCMC.
*
* @param groups Vector of groups to represent
* @return tuple with positions, charges
*/
auto mapGroupsToEigen(Space::GroupVector& groups) const {
for (auto &g : groups)
if (g.size() != g.capacity())
throw std::runtime_error("Eigen optimized Ewald not available with inactive groups");
auto first_particle = groups.front().begin();
auto last_particle = groups.back().end();
auto pos = asEigenMatrix(first_particle, last_particle,
&Particle::pos); // N x 3
auto charge = asEigenVector(first_particle, last_particle,
&Particle::charge); // N x 1
return std::make_tuple(pos, charge);
}
static std::shared_ptr<EwaldPolicyBase> makePolicy(EwaldData::Policies); //!< Policy factory
};
/**
* @brief Ion-Ion Ewald using periodic boundary conditions (PBC)
*/
struct PolicyIonIon : public EwaldPolicyBase {
PolicyIonIon();
void updateBox(EwaldData &, const Point &) const override;
void updateComplex(EwaldData&, Space::GroupVector&) const override;
void updateComplex(EwaldData&, Change&, Space::GroupVector&, Space::GroupVector&) const override;
double selfEnergy(const EwaldData&, Change&, Space::GroupVector&) override;
double surfaceEnergy(const EwaldData&, Change&, Space::GroupVector&) override;
double reciprocalEnergy(const EwaldData &) override;
};
/**
* @brief Ion-Ion Ewald with periodic boundary conditions (PBC) using Eigen
* operations
* @warning Will not work with Space with inactive particles (GCMC, for example)
*
* For compilers that offer good vectorization (gcc on linux) this brings a 4-5
* fold speed increase.
* Status on February, 2020:
* - Clang9: Eigen version is slower than generic version (macos/ubuntu)
* - GCC9: Eigen is 4-5 times faster on x86 linux; ~1.5 times *lower on macos.
*/
struct PolicyIonIonEigen : public PolicyIonIon {
using PolicyIonIon::updateComplex;
void updateComplex(EwaldData&, Space::GroupVector&) const override;
double reciprocalEnergy(const EwaldData &) override;
};
/**
* @brief Ion-Ion Ewald with isotropic periodic boundary conditions (IPBC)
*/
struct PolicyIonIonIPBC : public PolicyIonIon {
using PolicyIonIon::updateComplex;
PolicyIonIonIPBC();
void updateBox(EwaldData &, const Point &) const override;
void updateComplex(EwaldData&, Space::GroupVector&) const override;
void updateComplex(EwaldData&, Change&, Space::GroupVector&, Space::GroupVector&) const override;
};
/**
* @brief Ion-Ion Ewald with isotropic periodic boundary conditions (IPBC) using Eigen operations
* @warning Incomplete and under construction
*/
struct PolicyIonIonIPBCEigen : public PolicyIonIonIPBC {
using PolicyIonIonIPBC::updateComplex;
void updateComplex(EwaldData&, Space::GroupVector&) const override;
};
/** @brief Ewald summation reciprocal energy */
class Ewald : public Energybase {
private:
EwaldData data;
std::shared_ptr<EwaldPolicyBase> policy; //!< Policy for updating k-space
Space &spc;
Space::GroupVector* old_groups = nullptr;
public:
Ewald(const json &, Space &);
void init() override;
double energy(Change &) override;
void sync(Energybase*,
const Change&) override; //!< Called after a move is rejected/accepted
//! as well as before simulation
void to_json(json &) const override;
void force(std::vector<Point> &) override; // update forces on all particles
};
/**
* @brief Pressure term for NPT ensemble
*/
class Isobaric : public Energybase {
private:
const Space& spc;
double pressure = 0.0; //!< Applied pressure
static const std::map<std::string, double> pressure_units; //!< Possible ways pressure can be given
public:
Isobaric(const json& j, const Space& spc);
double energy(Change& change) override;
void to_json(json& j) const override;
};
/**
* @brief Constrain system using reaction coordinates
*
* If outside specified `range`, infinity energy is returned, causing rejection.
*/
class Constrain : public Energybase {
private:
std::string type;
std::shared_ptr<ReactionCoordinate::ReactionCoordinateBase> rc = nullptr;
public:
Constrain(const json &, Space &);
double energy(Change &) override;
void to_json(json &) const override;
};
/**
* The keys of the `intra` map are group index and the values
* is a vector of `BondData`. For bonds between groups, fill
* in `inter` which is evaluated for every update of call to
* `energy`.
*
* @todo Optimize.
*/
class Bonded : public Energybase {
private:
using BondVector = BasePointerVector<Potential::BondData>;
const Space& spc;
BondVector external_bonds; //!< inter-molecular bonds
std::map<int, BondVector> internal_bonds; //!< intra-molecular bonds; key is group index
void updateGroupBonds(const Space::GroupType& group); //!< Update/set bonds internally in group
void updateInternalBonds(); //!< finds and adds all intra-molecular bonds of active molecules
double sumBondEnergy(const BondVector& bonds) const; //!< sum energy in vector of BondData
double internalGroupEnergy(const Change::GroupChange& changed); //!< Energy from internal bonds
template <typename Indices> double sum_energy(const BondVector& bonds, const Indices& particle_indices) const;
public:
Bonded(const Space& spc, const BondVector& external_bonds);
Bonded(const json& j, const Space& spc);
void to_json(json& j) const override;
double energy(Change& change) override; //!< brute force -- refine this!
void force(std::vector<Point>& forces) override; //!< Calculates the forces on all particles
};
/**
* @brief Sum energy in vector of BondData for matching particle indices
* @param bonds List of bonds
* @param particle_indices Particle index
*
* To speed up the bond search, the given indices must be ordered which allows
* for binary search which on large systems provides superior performance compared
* to simplistic search which scales as number_of_bonds x number_of_moved_particles
*/
template <typename Indices>
double Bonded::sum_energy(const Bonded::BondVector& bonds, const Indices& particle_indices) const {
assert(std::is_sorted(particle_indices.begin(), particle_indices.end()));
auto bond_filter = [&](const auto& bond) { // determine if bond is part of indices of particles
for (const auto bond_particle_index : bond->indices) {
if (std::binary_search(particle_indices.begin(), particle_indices.end(), bond_particle_index)) {
return true;
}
}
return false;
};
auto affected_bonds = bonds | ranges::cpp20::views::filter(bond_filter);
auto bond_energy = [&](const auto& bond) { return bond->energyFunc(spc.geometry.getDistanceFunc()); };
#if (defined(__clang__) && __clang_major__ >= 10) || (defined(__GNUC__) && __GNUC__ >= 10)
return std::transform_reduce(affected_bonds.begin(), affected_bonds.end(), 0.0, std::plus<>(), bond_energy);
#else
double energy = 0.0;
for (const auto& bond : affected_bonds) {
energy += bond_energy(bond);
}
return energy;
#endif
}
/**
* @brief Provides a complementary set of ints with respect to the iota set of a given size.
* @remark It is used as a helper function for pair interactions.
*
* @tparam TSize a number castable to int
* @tparam TSet a finite iterable container on ints
* @param size the iota superset contains all integers in the range [0, size)
* @param set an original set of integers
* @return a set of ints complementary to the original set
*/
template <typename TSize, typename TSet> inline auto indexComplement(const TSize size, const TSet &set) {
assert(size <= std::numeric_limits<int>::max());
return ranges::views::ints(0, static_cast<int>(size)) |
ranges::views::remove_if([&set](TSize i) { return std::binary_search(set.begin(), set.end(), i); });
}
/**
* @brief Interface for energy accumulators
*
* The energy accumulator is used to add up energies between two particles.
* This can be done instantly (see `InstantEnergyAccumulator`) or delaying
* the evaluation until the energy is needed (`DelayedEnergyAccumulator`).
* The latter may be used with parallelism.
*
* @todo See https://www.youtube.com/watch?v=3LsRYnRDSRA for a bizarre example
* where a custom `struct Tpair { const Particle &first, second; };`
* outperforms `std::pair` due to missed compiler optimization.
*/
class EnergyAccumulatorBase {
protected:
double value = 0.0; //!< accumulated energy
using ParticleRef = const std::reference_wrapper<const Particle>; //!< Particle reference
using ParticlePair = std::pair<ParticleRef, ParticleRef>; //!< References to two particles
public:
enum class Scheme { SERIAL, OPENMP, PARALLEL, INVALID };
Scheme scheme = Scheme::SERIAL;
EnergyAccumulatorBase(double value);
virtual ~EnergyAccumulatorBase() = default;
virtual void reserve(size_t number_of_particles);
virtual void clear();
virtual void from_json(const json &j);
virtual void to_json(json &j) const;
virtual explicit operator double();
virtual EnergyAccumulatorBase& operator=(double new_value) = 0;
virtual EnergyAccumulatorBase& operator+=(double new_value) = 0;
virtual EnergyAccumulatorBase& operator+=(ParticlePair&& pair) = 0;
template <typename TOtherAccumulator> inline EnergyAccumulatorBase& operator+=(TOtherAccumulator& acc) {
value += static_cast<double>(acc);
return *this;
}
};
NLOHMANN_JSON_SERIALIZE_ENUM(EnergyAccumulatorBase::Scheme, {{EnergyAccumulatorBase::Scheme::INVALID, nullptr},
{EnergyAccumulatorBase::Scheme::SERIAL, "serial"},
{EnergyAccumulatorBase::Scheme::OPENMP, "openmp"},
{EnergyAccumulatorBase::Scheme::PARALLEL, "parallel"}})
/**
* @brief A basic accumulator which immediately computes and adds energy of a pair of particles upon addition using
* the PairEnergy templated class.
*
* Generally this is the original way how the pairwise nonbonded energy has been computed in Faunus. Due to compiler
* optimization, templated class method 'PairEnergy.potential' may be inlined to significantly improve performance.
*
* @tparam PairEnergy pair energy implementing a potential(a, b) method for particles a and b
*/
template <typename PairEnergy> class InstantEnergyAccumulator : public EnergyAccumulatorBase {
private:
const PairEnergy& pair_energy; //!< recipe to compute non-bonded energy between two particles, see PairEnergy
public:
InstantEnergyAccumulator(const PairEnergy& pair_energy, const double value = 0.0)
: EnergyAccumulatorBase(value), pair_energy(pair_energy) {}
inline InstantEnergyAccumulator& operator=(const double new_value) override {
value = new_value;
return *this;
}
inline InstantEnergyAccumulator& operator+=(const double new_value) override {
value += new_value;
return *this;
}
inline InstantEnergyAccumulator& operator+=(ParticlePair&& pair) override {
// keep this short to get inlined
value += pair_energy.potential(pair.first.get(), pair.second.get());
return *this;
}
void from_json(const json &j) override {
EnergyAccumulatorBase::from_json(j);
if (scheme != Scheme::SERIAL) {
faunus_logger->warn("unsupported summation scheme; falling back to 'serial'");
}
}
};
/**
* Stores a vector of particle pairs and postpones the energy evaluation until
* `operator double()` is called. Looping over the vector can be done in serial (as a fallback);
* using OpenMP; or using C++17 parallel algorithms if available.
*/
template <typename PairEnergy> class DelayedEnergyAccumulator : public EnergyAccumulatorBase {
private:
std::vector<ParticlePair> particle_pairs;
const PairEnergy& pair_energy; //!< recipe to compute non-bonded energy between two particles, see PairEnergy
const size_t max_particles_in_buffer = 10000; //!< this can be modified to suit memory requirements
public:
explicit DelayedEnergyAccumulator(const PairEnergy& pair_energy, const double value = 0.0)
: EnergyAccumulatorBase(value), pair_energy(pair_energy) {}
/** Reserve memory for (N-1)*N/2 interaction pairs */
void reserve(size_t number_of_particles) override {
try {
number_of_particles = std::min(number_of_particles, max_particles_in_buffer);
const auto number_of_pairs = (number_of_particles - 1U) * number_of_particles / 2U;
faunus_logger->debug(fmt::format("reserving memory for {} energy pairs ({} MB)", number_of_pairs,
number_of_pairs * sizeof(ParticlePair) / (1024U * 1024U)));
particle_pairs.reserve(number_of_pairs);
} catch (std::exception& e) {
throw std::runtime_error(
fmt::format("cannot allocate memory for energy pairs: {}. Use another summation policy.", e.what()));
}
}
void clear() override {
value = 0.0;
particle_pairs.clear();
}
DelayedEnergyAccumulator& operator=(const double new_value) override {
clear();
value = new_value;
return *this;
}
inline DelayedEnergyAccumulator& operator+=(const double new_value) override {
value += new_value;
return *this;
}
inline DelayedEnergyAccumulator& operator+=(ParticlePair&& pair) override {
assert(particle_pairs.capacity() > 0);
if (particle_pairs.size() == particle_pairs.capacity()) {
operator double(); // sum stored pairs and reset buffer
}
particle_pairs.template emplace_back(std::move(pair));
return *this;
}
explicit operator double() override {
switch (scheme) {
case Scheme::OPENMP:
value += accumulateOpenMP();
break;
case Scheme::PARALLEL:
value += accumulateParallel();
break;
default:
value += accumulateSerial();
}
particle_pairs.clear();
return value;
}
private:
double accumulateSerial() const {
double sum = 0.0;
for (const auto [particle1, particle2] : particle_pairs) {
sum += pair_energy.potential(particle1.get(), particle2.get());
}
return sum;
}
double accumulateParallel() const {
#if defined(HAS_PARALLEL_TRANSFORM_REDUCE)
return std::transform_reduce(
std::execution::par, particle_pairs.cbegin(), particle_pairs.cend(), 0.0, std::plus<double>(),
[&](const auto& pair) { return pair_energy.potential(pair.first.get(), pair.second.get()); });
#else
return accumulateSerial(); // fallback
#endif
}
double accumulateOpenMP() const {
double sum = 0.0;
#pragma omp parallel for reduction(+ : sum)
for (const auto& pair : particle_pairs) {
sum += pair_energy.potential(pair.first.get(), pair.second.get());
}
return sum;
}
};
template <typename TPairEnergy>
std::shared_ptr<EnergyAccumulatorBase> createEnergyAccumulator(const json& j, const TPairEnergy& pair_energy,
double initial_value) {
std::shared_ptr<EnergyAccumulatorBase> accumulator;
if (j.value("summation_policy", EnergyAccumulatorBase::Scheme::SERIAL) != EnergyAccumulatorBase::Scheme::SERIAL) {
accumulator = std::make_shared<DelayedEnergyAccumulator<TPairEnergy>>(pair_energy, initial_value);
faunus_logger->debug("activated delayed energy summation");
} else {
accumulator = std::make_shared<InstantEnergyAccumulator<TPairEnergy>>(pair_energy, initial_value);
faunus_logger->debug("activated instant energy summation");
}
accumulator->from_json(j);
return accumulator;
}
/**
* @brief Determines if two groups are separated beyond the cutoff distance.
*
* The distance between centers of mass is considered. The cutoff distance can be specified independently for each
* group pair to override the default value.
*
* @see GroupPairingPolicy
*/
class GroupCutoff {
double default_cutoff_squared = pc::max_value;
PairMatrix<double> cutoff_squared; //!< matrix with group-to-group cutoff distances squared in angstrom squared
double total_cnt = 0, skip_cnt = 0; //!< statistics
Space::GeometryType& geometry; //!< geometry to compute the inter group distance with
friend void from_json(const json&, GroupCutoff &);
friend void to_json(json&, const GroupCutoff &);
void setSingleCutoff(const double cutoff);
public:
/**
* @brief Determines if two groups are separated beyond the cutoff distance.
* @return true if the group-to-group distance is beyond the cutoff distance, false otherwise
*/
inline bool cut(const Space::GroupType& group1, const Space::GroupType& group2) {
bool result = false;
++total_cnt;
if (group1.isMolecular() && group2.isMolecular() // atomic groups have no meaningful cm
&& geometry.sqdist(group1.mass_center, group2.mass_center) >= cutoff_squared(group1.id, group2.id)) {
result = true;
++skip_cnt;
}
return result;
}
double getCutoff(size_t id1, size_t id2) const;
/**
* @brief A functor alias for cut().
* @see cut()
*/
template <typename... Args>
inline auto operator()(Args &&... args) { return cut(std::forward<Args>(args)...); }
/**
* @brief Sets the geometry.
* @param geometry geometry to compute the inter group distance with
*/
GroupCutoff(Space::GeometryType& geometry);
};
void from_json(const json&, GroupCutoff &);
void to_json(json&, const GroupCutoff &);
/**
* @brief Provides a fast inlineable interface for non-bonded pair potential energy computation.
*
* @tparam TPairPotential a pair potential to compute with
* @tparam allow_anisotropic_pair_potential pass also a distance vector to the pair potential, slower
*/
template <typename TPairPotential, bool allow_anisotropic_pair_potential = true> class PairEnergy {
const Space::GeometryType& geometry; //!< geometry to operate with
TPairPotential pair_potential; //!< pair potential function/functor
Space &spc; //!< space to init ParticleSelfEnergy with addPairPotentialSelfEnergy
BasePointerVector<Energybase> &potentials; //!< registered non-bonded potentials, see addPairPotentialSelfEnergy
public:
/**
* @param spc
* @param potentials registered non-bonded potentials
*/
PairEnergy(Space& spc, BasePointerVector<Energybase>& potentials)
: geometry(spc.geometry), spc(spc), potentials(potentials) {}
/**
* @brief Computes pair potential energy.
*
* @param a particle
* @param b particle
* @return pair potential energy between particles a and b
*/
template <typename T>
inline double potential(const T &a, const T &b) const {
assert(&a != &b); // a and b cannot be the same particle
if constexpr (allow_anisotropic_pair_potential) {
const Point r = geometry.vdist(a.pos, b.pos);
return pair_potential(a, b, r.squaredNorm(), r);
} else {
return pair_potential(a, b, geometry.sqdist(a.pos, b.pos), {0, 0, 0});
}
}
// just a temporary placement until PairForce class template will be implemented
template <typename ParticleType> inline Point force(const ParticleType& a, const ParticleType& b) const {
assert(&a != &b); // a and b cannot be the same particle
const Point b_towards_a = geometry.vdist(a.pos, b.pos); // vector b -> a = a - b
return pair_potential.force(a, b, b_towards_a.squaredNorm(), b_towards_a);
}
/**
* @brief A functor alias for potential().
* @see potential()
*/
template <typename... Args>
inline auto operator()(Args &&... args) {
return potential(std::forward<Args>(args)...);
}
/**
* @brief Registers the potential self-energy to hamiltonian if needed.
* @see Hamiltonian::Hamiltonian
*/
void addPairPotentialSelfEnergy() {
if (pair_potential.selfEnergy) { // only add if self energy is defined
faunus_logger->debug("Adding self-energy from {} to hamiltonian", pair_potential.name);
potentials.emplace_back<Energy::ParticleSelfEnergy>(spc, pair_potential.selfEnergy);
}
}
void from_json(const json &j) {
pair_potential.from_json(j);
if (!pair_potential.isotropic && !allow_anisotropic_pair_potential) {
throw std::logic_error("Only isotropic pair potentials are allowed.");
}
addPairPotentialSelfEnergy();
}
void to_json(json &j) const { pair_potential.to_json(j); }
};
/**
* @brief Particle pairing to calculate pairẃise interaction using particles' groups internally. Depending on
* the accumulator provided, raw particle pairs, energy sum, etc. can be obtained.
*
* Accumulator is used as the first argument in all methods. Accumulator shall overload '+=' operator to accept a pair
* of particle references as used in particle2particle method.
*
* @remark Method arguments are generally not checked for correctness because of performance reasons.
*
* @tparam TCutoff a cutoff scheme between groups
* @see InstantEnergyAccumulator, GroupCutoff
*/
template <typename TCutoff>
class GroupPairingPolicy {
protected:
const Space &spc; //!< a space to operate on
TCutoff cut; //!< a cutoff functor that determines if energy between two groups can be ignored
public:
/**
* @param spc
*/
GroupPairingPolicy(Space& spc) : spc(spc), cut(spc.geometry) {}
void from_json(const json &j) {
Energy::from_json(j, cut);
}
void to_json(json &j) const {
Energy::to_json(j, cut);
}
/**
* @brief Add two interacting particles to the accumulator.
*
* Due to compiler optimization, the '+=' operator and this function itself may be inlined to significantly
* improve performance.
*
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @tparam T an interacting particle
* @param pair_accumulator accumulator of interacting pairs of particles
* @param a first particle
* @param b second particle
*/
template <typename TAccumulator, typename T>
inline void particle2particle(TAccumulator &pair_accumulator, const T &a, const T &b) const {
pair_accumulator += {std::cref(a), std::cref(b)};
}
/**
* @brief All pairings within a group.
*
* All pair interaction within the group are accumulated. The pair exclusions defined in the molecule
* topology are honoured.
*
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @tparam TGroup
* @param group
* @param pair_accumulator accumulator of interacting pairs of particles
*/
template <typename TAccumulator, typename TGroup>
void groupInternal(TAccumulator &pair_accumulator, const TGroup &group) {
const auto &moldata = group.traits();
if (!moldata.rigid) {
const int group_size = group.size();
for (int i = 0; i < group_size - 1; ++i) {
for (int j = i + 1; j < group_size; ++j) {
// This compound condition is faster than an outer atomic condition;
// tested on bulk example in GCC 9.2.
if (group.isAtomic() || !moldata.isPairExcluded(i, j)) {
particle2particle(pair_accumulator, group[i], group[j]);
}
}
}
}
}
/**
* @brief Pairings of a single particle within the group.
*
* The pair exclusions defined in the molecule topology are honoured.
*
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @tparam TGroup
* @param pair_accumulator accumulator of interacting pairs of particles
* @param group
* @param index internal index of the selected particle within the group
*/
template <typename TAccumulator, typename TGroup>
void groupInternal(TAccumulator& pair_accumulator, const TGroup& group, const std::size_t index) {
const auto &moldata = group.traits();
if (!moldata.rigid) {
if (group.isAtomic()) {
// speed optimization: non-bonded interaction exclusions do not need to be checked for atomic groups
for (int i = 0; i < index; ++i) {
particle2particle(pair_accumulator, group[index], group[i]);
}
for (int i = index + 1; i < group.size(); ++i) {
particle2particle(pair_accumulator, group[index], group[i]);
}
} else {
// molecular group
for (int i = 0; i < index; ++i) {
if (!moldata.isPairExcluded(index, i)) {
particle2particle(pair_accumulator, group[index], group[i]);
}
}
for (int i = index + 1; i < group.size(); ++i) {
if (!moldata.isPairExcluded(index, i)) {
particle2particle(pair_accumulator, group[index], group[i]);
}
}
}
}
}
/**
* @brief Pairing in the group involving only the particles present in the index.
*
* Only such non-bonded pair interactions within the group are considered if at least one particle is present
* in the index. The pair exclusions defined in the molecule topology are honoured.
*
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @tparam TGroup
* @tparam TIndex
* @param pair_accumulator accumulator of interacting pairs of particles
* @param group
* @param index internal indices of particles within the group
*/
template <typename TAccumulator, typename TGroup, typename TIndex>
void groupInternal(TAccumulator &pair_accumulator, const TGroup &group, const TIndex &index) {
auto &moldata = group.traits();
if (!moldata.rigid) {
if (index.size() == 1) {
groupInternal(pair_accumulator, group, index[0]);
} else {
// TODO investigate overhead of `index_complement` filtering;
// TODO perhaps allow different strategies based on the index-size/group-size ratio
auto index_complement = indexComplement(group.size(), index);
// moved <-> static
for (int i : index) {
for (int j : index_complement) {
if (!moldata.isPairExcluded(i, j)) {
particle2particle(pair_accumulator, group[i], group[j]);
}
}
}
// moved <-> moved
for (auto i_it = index.begin(); i_it < index.end(); ++i_it) {
for (auto j_it = std::next(i_it); j_it < index.end(); ++j_it) {
if (!moldata.isPairExcluded(*i_it, *j_it)) {
particle2particle(pair_accumulator, group[*i_it], group[*j_it]);
}
}
}
}
}
}
/**
* @brief Complete cartesian pairing of particles in two groups.
*
* group1 × group2
*
* If the distance between the groups is greater or equal to the group cutoff distance, no calculation is performed.
* The group intersection must be an empty set, i.e., no particle is included in both groups. This is not verified
* for performance reason.
*
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @tparam TGroup
* @param pair_accumulator accumulator of interacting pairs of particles
* @param group1
* @param group2
*/
template <typename TAccumulator, typename TGroup>
void group2group(TAccumulator &pair_accumulator, const TGroup &group1, const TGroup &group2) {
if (!cut(group1, group2)) {
for (auto &particle1 : group1) {
for (auto &particle2 : group2) {
particle2particle(pair_accumulator, particle1, particle2);
}
}
}
}
/**
* @brief Cross pairing of particles in two groups. Only a cartesian subset of the complete cartesian product is
* considered as the particles in the first group must be also present in the index. The aim is to capture only
* interactions that involve changing (indexed) particles.
*
* ⊕group1 × group2, where ⊕ denotes a filter by an index
*
* If the distance between the groups is greater or equal to the group cutoff distance, no calculation is performed.
* The group intersection must be an empty set, i.e., no particle is included in both groups. This is not verified
* for performance reason.
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @tparam TGroup
* @param pair_accumulator accumulator of interacting pairs of particles
* @param group1
* @param group2
* @param index1 list of particle indices in group1 relative to the group beginning
*/
template <typename TAccumulator, typename TGroup>
void group2group(TAccumulator& pair_accumulator, const TGroup& group1, const TGroup& group2,
const std::vector<std::size_t>& index1) {
if (!cut(group1, group2)) {
for (auto particle1_ndx : index1) {
for (auto &particle2 : group2) {
particle2particle(pair_accumulator, *(group1.begin() + particle1_ndx), particle2);
}
}
}
}
/**
* @brief Cross pairing of particles in two groups. Only a non-cartesian subset of the complete cartesian product
* is considered as at least one particles in the pair must be also present in the respective index. The aim is
* to capture only interactions that involve changing (indexed) particles, i.e., to avoid pairs containing only
* non-indexed particles.
*
* (⊕group1 × ∁⊕group2) + (∁⊕group1 × ⊕group2) + (⊕group1 × ⊕group2) =
* = group1 × group2 − (∁⊕group2 × ∁⊕group2), where ⊕ denotes a filter by an index and ∁ a complement
*
* If the distance between the groups is greater or equal to the group cutoff distance, no calculation is performed.
* The group intersection must be an empty set, i.e., no particle is included in both groups. This is not verified
* for performance reason.
*
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @tparam TGroup
* @param pair_accumulator accumulator of interacting pairs of particles
* @param group1
* @param group2
* @param index1 list of particle indices in group1 relative to the group beginning
* @param index2 list of particle indices in group2 relative to the group beginning
*/
template <typename TAccumulator, typename TGroup>
void group2group(TAccumulator& pair_accumulator, const TGroup& group1, const TGroup& group2,
const std::vector<std::size_t>& index1, const std::vector<std::size_t>& index2) {
if (!cut(group1, group2)) {
if (!index2.empty()) {
// (∁⊕group1 × ⊕group2) + (⊕group1 × ⊕group2) = group1 × ⊕group2
group2group(pair_accumulator, group2, group1, index2);
// + (⊕group1 × ∁⊕group2)
auto index2_complement = indexComplement(group2.size(), index2);
for (auto particle1_ndx : index1) {
for (auto particle2_ndx : index2_complement) {
particle2particle(pair_accumulator, group2[particle2_ndx], group1[particle1_ndx]);
}
}
} else if (!index1.empty()) {
// (⊕group1 × ∁⊕group2) + (⊕group1 × ⊕group2) = ⊕group1 × group2
group2group(pair_accumulator, group1, group2, index1);
// + (∁⊕group1 × ⊕group2) = Ø as ⊕group2 is empty
} else {
// both indices empty hence nothing to do
}
}
}
/**
* @brief Complete cartesian pairing between particles in a group and a union of groups.
*
* group × (∪ groups)
*
* If the distance between the groups is greater or equal to the group cutoff distance, the particle pairing between
* them is skipped. The internal energy of the group is not computed even if the group is also present in the union
* of groups.
*
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @tparam TGroup
* @tparam TGroups
* @param pair_accumulator accumulator of interacting pairs of particles
* @param group
* @param groups
*/
template <typename TAccumulator, typename TGroup, typename TGroups>
void group2groups(TAccumulator &pair_accumulator, const TGroup &group, const TGroups &groups) {
for (auto &other_group : groups) {
if (&other_group != &group) {
group2group(pair_accumulator, group, other_group);
}
}
}
/**
* @brief Cross pairing of particles in a group and a union of groups. Only a cartesian subset of the complete
* cartesian product is considered as the particles of the first group must be also present in the index. The aim
* is to capture only interactions that involve changing (indexed) particles.
*
* ⊕group × (∪ groups), where ⊕ denotes a filter by an index
*
* If the distance between the groups is greater or equal to the group cutoff distance, the particle pairing
* between them is skipped. The internal energy of the group is not computed even if the group is also present
* in the union of groups.
*
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @tparam TGroup
* @tparam TGroups
* @param pair_accumulator accumulator of interacting pairs of particles
* @param group
* @param group_index groups as indices in Space::groups
* @param index list of particle indices in the group relative to the group beginning
*/
template <typename TAccumulator, typename TGroup, typename TGroups>
void group2groups(TAccumulator& pair_accumulator, const TGroup& group, const TGroups& group_index,
const std::vector<std::size_t>& index) {
for (auto other_group_ndx : group_index) {
const auto &other_group = spc.groups[other_group_ndx];
if (&other_group != &group) {
group2group(pair_accumulator, group, other_group, index);
}
}
}
/**
* @brief Complete cartesian pairing between particles in a group and particles in other groups in space.
*
* group × (space ∖ group)
*
* If the distance between the groups is greater or equal to the group cutoff distance, the particle pairing
* between them is skipped.
*
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @tparam TGroup
* @param pair_accumulator accumulator of interacting pairs of particles
* @param group
*/
template <typename TAccumulator, typename Tgroup>
void group2all(TAccumulator &pair_accumulator, const Tgroup &group) {
for (auto &other_group : spc.groups) {
if (&other_group != &group) {
group2group(pair_accumulator, group, other_group);
}
}
}
/**
* @brief Complete cartesian pairing between a single particle in a group and particles in other groups in space.
*
* ⊕group × (space ∖ group), where ⊕ denotes a filter by an index (here a single particle)
*
* If the distance between the groups is greater or equal to the group cutoff distance, the particle pairing
* between them is skipped. This method is performance-optimized version of the multiple indices method.
*
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @tparam TGroup
* @param pair_accumulator accumulator of interacting pairs of particles
* @param group
* @param index a particle index relative to the group beginning
*/
template <typename TAccumulator, typename TGroup>
void group2all(TAccumulator &pair_accumulator, const TGroup &group, const int index) {
const auto &particle = group[index];
for (auto &other_group : spc.groups) {
if (&other_group != &group) { // avoid self-interaction
if (!cut(other_group, group)) { // check g2g cut-off
for (auto &other_particle : other_group) { // loop over particles in other group
particle2particle(pair_accumulator, particle, other_particle);
}
}
}
}
}
/**
* @brief Complete cartesian pairing between selected particles in a group and particles in other groups in space.
*
* ⊕group × (space ∖ group), where ⊕ denotes a filter by an index
*
* If the distance between the groups is greater or equal to the group cutoff distance, the particle pairing
* between them is skipped.
*
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @param pair_accumulator accumulator of interacting pairs of particles
* @param group
* @param index list of particle indices in the group relative to the group beginning
*/
template <typename TAccumulator, typename Tgroup>
void group2all(TAccumulator& pair_accumulator, const Tgroup& group, const std::vector<std::size_t>& index) {
if (index.size() == 1) {
group2all(pair_accumulator, group, index[0]);
} else {
for (auto &other_group : spc.groups) {
if (&other_group != &group) {
group2group(pair_accumulator, group, other_group, index);
}
}
}
}
/**
* @brief Cross pairing of particles among a union of groups. No internal pairs within any group are considered.
*
* If the distance between any two groups is greater or equal to the group cutoff distance, the particle pairing
* between them is skipped.
*
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @tparam T
* @param pair_accumulator accumulator of interacting pairs of particles
* @param group_index list of groups
*/
template <typename TAccumulator, typename T>
void groups2self(TAccumulator &pair_accumulator, const T &group_index) {
for (auto group1_ndx_it = group_index.begin(); group1_ndx_it < group_index.end(); ++group1_ndx_it) {
//no such move exists that the internal energy has to be recalculated
//groupInternal(pair_accumulator, spc.groups[*group1_ndx_it]);
for (auto group2_ndx_it = std::next(group1_ndx_it); group2_ndx_it < group_index.end(); group2_ndx_it++) {
group2group(pair_accumulator, spc.groups[*group1_ndx_it], spc.groups[*group2_ndx_it]);
}
}
}
/**
* @brief Cross pairing of particles between a union of groups and its complement in space.
*
* If the distance between any two groups is greater or equal to the group cutoff distance, the particle pairing
* between them is skipped.
*
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @tparam T
* @param pair_accumulator accumulator of interacting pairs of particles
* @param group_index list of groups
*/
template <typename TAccumulator, typename T>
void groups2all(TAccumulator &pair_accumulator, const T &group_index) {
groups2self(pair_accumulator, group_index);
auto index_complement = indexComplement(spc.groups.size(), group_index);
for (auto group1_ndx : group_index) {
for (auto group2_ndx : index_complement) {
group2group(pair_accumulator, spc.groups[group1_ndx], spc.groups[group2_ndx]);
}
}
}
/**
* @brief Cross pairing between all particles in the space.
*
* If the distance between particles' groups is greater or equal to the group cutoff distance, no calculation is
* performed.
*
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @param pair_accumulator accumulator of interacting pairs of particles
*/
template <typename TAccumulator>
void all(TAccumulator &pair_accumulator) {
for (auto group_it = spc.groups.begin(); group_it < spc.groups.end(); ++group_it) {
groupInternal(pair_accumulator, *group_it);
for (auto other_group_it = std::next(group_it); other_group_it < spc.groups.end(); other_group_it++) {
group2group(pair_accumulator, *group_it, *other_group_it);
}
}
}
/**
* @brief Cross pairing between all particles in the space.
*
* If the distance between particles' groups is greater or equal to the group cutoff distance, no calculation is
* performed.
*
* @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references
* {T&, T&}
* @tparam TCondition a function returning bool and having a group as an argument
* @param pair_accumulator accumulator of interacting pairs of particles
* @param condition a group filter if internal energy of the group shall be added
*/
template <typename TAccumulator, typename TCondition>
void all(TAccumulator &pair_accumulator, TCondition condition) {
for (auto group_it = spc.groups.begin(); group_it < spc.groups.end(); ++group_it) {
if (condition(*group_it)) {
groupInternal(pair_accumulator, *group_it);
}
for (auto other_group_it = std::next(group_it); other_group_it < spc.groups.end(); other_group_it++) {
group2group(pair_accumulator, *group_it, *other_group_it);
}
}
}
};
/**
* @brief Computes pair quantity difference for a systen perturbation. Such quantity can be energy using nonponded
* pair potential
* .
* @tparam TPolicy a pairing policy
*/
template <typename TPolicy>
class GroupPairing {
const Space &spc;
TPolicy pairing;
protected:
/**
* @brief Computes pair quantity difference if only a single group has changed.
*
* @tparam TAccumulator
* @param pair_accumulator accumulator of interacting pairs of particles
* @param change
*/
template <typename TAccumulator>
void accumulateGroup(TAccumulator &pair_accumulator, const Change &change) {
const auto &change_data = change.groups.at(0);
const auto& group = spc.groups.at(change_data.group_index);
if (change_data.relative_atom_indices.size() == 1) {
// faster algorithm if only a single particle moves
pairing.group2all(pair_accumulator, group, change_data.relative_atom_indices[0]);
if (change_data.internal) {
pairing.groupInternal(pair_accumulator, group, change_data.relative_atom_indices[0]);
}
} else {
const bool change_all = change_data.relative_atom_indices.empty(); // all particles or only their subset?
if (change_all) {
pairing.group2all(pair_accumulator, group);
if (change_data.internal) {
pairing.groupInternal(pair_accumulator, group);
}
} else {
pairing.group2all(pair_accumulator, group, change_data.relative_atom_indices);
if (change_data.internal) {
pairing.groupInternal(pair_accumulator, group, change_data.relative_atom_indices);
}
}
}
}
/**
* @brief Computes pair quantity difference if the number of particles has changed.
*
* Particles have to be explicitly enumerated in the atom indices of the changed group. Implicit addition of atoms
* with a group is not supported yet. Note that we do not have to care about missing (removed) particles at all.
* They are taken into account in the original (old) space where they are present.
*
* @param pair_accumulator accumulator of interacting pairs of particles
* @param change
*/
template <typename TAccumulator>
void accumulateSpeciation(TAccumulator &pair_accumulator, const Change &change) {
assert(change.matter_change);
const auto &moved = change.touchedGroupIndex(); // index of moved groups
const auto& fixed =
indexComplement(spc.groups.size(), moved) | ranges::to<std::vector>; // index of static groups
auto filter_active = [](int size) { return ranges::views::filter([size](const auto i) { return i < size; }); };
// loop over all changed groups
for (auto change_group1_it = change.groups.begin(); change_group1_it < change.groups.end(); ++change_group1_it) {
auto& group1 = spc.groups.at(change_group1_it->group_index);
// filter only active particles
const auto index1 =
change_group1_it->relative_atom_indices | filter_active(group1.size()) | ranges::to<std::vector>;
if (!index1.empty()) {
// particles added into the group: compute (changed group) <-> (static group)
pairing.group2groups(pair_accumulator, group1, fixed, index1);
}
// loop over successor changed groups (hence avoid double counting group1×group2 and group2×group1)
for (auto change_group2_it = std::next(change_group1_it); change_group2_it < change.groups.end(); ++change_group2_it) {
auto& group2 = spc.groups.at(change_group2_it->group_index);
const auto index2 =
change_group2_it->relative_atom_indices | filter_active(group2.size()) | ranges::to<std::vector>;
if (!index1.empty() || !index2.empty()) {
// particles added into one or other group: compute (changed group) <-> (changed group)
pairing.group2group(pair_accumulator, group1, group2, index1, index2);
}
}
if (!index1.empty() && !molecules.at(group1.id).rigid) {
// compute internal energy in the changed group
if (change_group1_it->all) {
pairing.groupInternal(pair_accumulator, group1);
} else {
pairing.groupInternal(pair_accumulator, group1, index1);
};
}
}
}
public:
/**
* @brief Computes pair quantity difference from changed particles.
*
* The internal energy contribution, i.e., the contribution from the intra group interactions, is added
* only if a single group is changed or if all changed.
*
* @param change
* @param pair_accumulator accumulator of interacting pairs of particles
*/
template <typename TAccumulator>
void accumulate(TAccumulator &pair_accumulator, const Change &change) {
assert(std::is_sorted(change.groups.begin(), change.groups.end()));
if (change.everything) {
pairing.all(pair_accumulator);
} else if (change.volume_change) {
// sum all interaction energies except the internal energies of incompressible molecules
pairing.all(pair_accumulator, [](auto& group) { return group.isAtomic() || group.traits().compressible; });
} else if (!change.matter_change) {
if (change.groups.size() == 1) {
// if only a single group changes use faster algorithm and optionally add the internal energy
accumulateGroup(pair_accumulator, change);
} else {
// if multiple groups move, no internal energies are computed
const auto &moved = change.touchedGroupIndex(); // index of moved groups
pairing.groups2all(pair_accumulator, moved);
}
} else { // change.dN
accumulateSpeciation(pair_accumulator, change);
}
}
GroupPairing(Space &spc) : spc(spc), pairing(spc) {}
void from_json(const json &j) {
pairing.from_json(j);
}
void to_json(json &j) const {
pairing.to_json(j);
}
// FIXME a temporal fix for non-refactorized NonbondedCached
template <typename Accumulator>
void group2group(Accumulator& pair_accumulator, const Space::GroupType& group1, const Space::GroupType& group2) {
pairing.group2group(std::forward<Accumulator&>(pair_accumulator), std::forward<const Space::GroupType&>(group1),
std::forward<const Space::GroupType&>(group2));
}
};
/**
* @brief Computes change in the non-bonded energy, assuming pairwise additive energy terms.
*
* @tparam TPairEnergy a functor to compute non-bonded energy between two particles
* @tparam TPairingPolicy pairing policy to effectively sum up the pairwise additive non-bonded energy
*/
template <typename TPairEnergy, typename TPairingPolicy> class Nonbonded : public Energybase {
protected:
const Space& spc; //!< space to operate on
TPairEnergy pair_energy; //!< a functor to compute non-bonded energy between two particles, see PairEnergy
TPairingPolicy pairing; //!< pairing policy to effectively sum up the pairwise additive non-bonded energy
std::shared_ptr<EnergyAccumulatorBase>
energy_accumulator; //!< energy accumulator used for storing and summing pair-wise energies
public:
Nonbonded(const json& j, Space& spc, BasePointerVector<Energybase>& pot)
: spc(spc), pair_energy(spc, pot), pairing(spc) {
name = "nonbonded";
from_json(j);
energy_accumulator = createEnergyAccumulator(j, pair_energy, 0.0);
energy_accumulator->reserve(spc.numParticles()); // attempt to reduce memory fragmentation
}
void from_json(const json &j) {
pair_energy.from_json(j);
pairing.from_json(j);
}
void to_json(json &j) const override {
pair_energy.to_json(j);
pairing.to_json(j);
energy_accumulator->to_json(j);
}
double energy(Change& change) override {
energy_accumulator->clear();
// down-cast to avoid slow, virtual function calls:
if (auto ptr = std::dynamic_pointer_cast<InstantEnergyAccumulator<TPairEnergy>>(energy_accumulator)) {
pairing.accumulate(*ptr, change);
} else if (auto ptr = std::dynamic_pointer_cast<DelayedEnergyAccumulator<TPairEnergy>>(energy_accumulator)) {
pairing.accumulate(*ptr, change);
} else {
pairing.accumulate(*energy_accumulator, change);
}
return static_cast<double>(*energy_accumulator);
}
/**
* @brief Calculates the force on all particles.
*
* @todo A stub. Change to reflect only active particle, see Space::activeParticles().
*/
void force(std::vector<Point> &forces) override {
// just a temporary hack; perhaps better to allow PairForce instead of the PairEnergy template
assert(forces.size() == spc.particles.size() && "the forces size must match the particle size");
for (size_t i = 0; i < spc.particles.size() - 1; ++i) {
for (size_t j = i + 1; j < spc.particles.size(); ++j) {
const Point f = pair_energy.force(spc.particles[i], spc.particles[j]);
forces[i] += f;
forces[j] -= f;
}
}
}
};
/**
* @brief Computes non-bonded energy contribution from changed particles. Cache group2group energy once calculated,
* until a new trial configuration is provided. Not for general use as only partially implemented!
*
* Original implementation, only refurbished. Generally suboptimal as only PairingPolicy::group2group method
* may be called.
* No internal energy is ever computed. Cannot deal with particle count changes. And other unmentioned constrains.
*
* @tparam TPairEnergy a functor to compute non-bonded energy between two particles
* @tparam TPairingPolicy pairing policy to effectively sum up the pairwise additive non-bonded energy
*/
template <typename TPairEnergy, typename TPairingPolicy>
class NonbondedCached : public Nonbonded<TPairEnergy, TPairingPolicy> {
typedef Nonbonded<TPairEnergy, TPairingPolicy> Base;
typedef InstantEnergyAccumulator<TPairEnergy> TAccumulator;
Eigen::MatrixXf energy_cache;
using Base::spc;
template <typename TGroup>
double g2g(const TGroup &g1, const TGroup &g2) {
int i = &g1 - spc.groups.data();
int j = &g2 - spc.groups.data();
if (j < i) {
std::swap(i, j);
}
if (Energybase::state == Energybase::MonteCarloState::TRIAL) { // if this is from the trial system
TAccumulator energy_accumulator(Base::pair_energy);
Base::pairing.group2group(energy_accumulator, g1, g2);
energy_cache(i, j) = static_cast<double>(energy_accumulator); // update the cache
}
return energy_cache(i, j); // return (cached) value
}
template <typename TGroup>
double g2g(const TGroup& g1, const TGroup& g2, [[maybe_unused]] const std::vector<std::size_t>& index) {
// index not implemented
return g2g(g1, g2);
}
public:
NonbondedCached(const json &j, Space &spc, BasePointerVector<Energybase> &pot) : Base(j, spc, pot) {
Base::name += "EM";
init();
}
/**
* @brief Cache pair interactions in matrix.
*/
void init() override {
const auto groups_size = spc.groups.size();
energy_cache.resize(groups_size, groups_size);
energy_cache.setZero();
TAccumulator u(Base::pair_energy);
for (auto i = 0; i < groups_size - 1; ++i) {
for (auto j = i + 1; j < groups_size; ++j) {
u = 0.0;
Base::pairing.group2group(u, spc.groups.at(i), spc.groups.at(j));
energy_cache(i, j) = static_cast<double>(u);
}
}
}
double energy(Change &change) override {
// Only g2g may be called there to compute (and cache) energy!
double energy_sum = 0.0;
if (change) {
if (change.everything || change.volume_change) {
for (auto i = spc.groups.begin(); i < spc.groups.end(); ++i) {
for (auto j = std::next(i); j < Base::spc.groups.end(); ++j) {
energy_sum += g2g(*i, *j);
}
}
} else {
if (change.groups.size() == 1) { // if exactly ONE molecule is changed
auto &d = change.groups[0];
auto& g1 = spc.groups.at(d.group_index);
for (auto g2_it = spc.groups.begin(); g2_it < spc.groups.end(); ++g2_it) {
if (&g1 != &(*g2_it)) {
energy_sum += g2g(g1, *g2_it, d.relative_atom_indices);
}
}
} else { // many molecules are changed
auto moved = change.touchedGroupIndex(); // index of moved groups
// moved<->moved
if (change.moved_to_moved_interactions) {
for (auto i = moved.begin(); i < moved.end(); ++i) {
for (auto j = std::next(i); j < moved.end(); ++j) {
energy_sum += g2g(spc.groups[*i], spc.groups[*j]);
}
}
}
// moved<->static
#if true
// classic version
auto fixed = indexComplement(spc.groups.size(), moved); // index of static groups
for (auto i : moved) {
for (auto j : fixed) {
energy_sum += g2g(spc.groups[i], spc.groups[j]);
}
}
#else
// OMP-ready version
auto fixed =
indexComplement(spc.groups.size(), moved) | ranges::to<std::vector>; // index of static groups
const size_t moved_size = moved.size();
const size_t fixed_size = fixed.size();
for (auto i = 0; i < moved_size; ++i) {
for (auto j = 0; j < fixed_size; ++j) {
energy_sum += g2g(spc.groups[moved[i]], spc.groups[fixed[j]]);
}
}
#endif
}
}
// more todo!
}
return energy_sum;
}
/**
* @brief Copy energy matrix from other
* @param base_ptr
* @param change
*/
void sync(Energybase* base_ptr, const Change& change) override {
auto other = dynamic_cast<decltype(this)>(base_ptr);
assert(other);
if (change.everything || change.volume_change) {
energy_cache.triangularView<Eigen::StrictlyUpper>() =
(other->energy_cache).template triangularView<Eigen::StrictlyUpper>();
} else {
for (auto &d : change.groups) {
for (int i = 0; i < d.group_index; i++) {
energy_cache(i, d.group_index) = other->energy_cache(i, d.group_index);
}
for (size_t i = d.group_index + 1; i < spc.groups.size(); i++) {
energy_cache(d.group_index, i) = other->energy_cache(d.group_index, i);
}
}
}
}
};
#ifdef ENABLE_FREESASA
/**
* @brief Interface to the FreeSASA C-library. Experimental and unoptimized.
* https://freesasa.github.io/
*
* @todo - Implement partial evaluation refelcting `change` object
* - Average volume currently mixes accepted/rejected states
*/
class FreeSASAEnergy : public Energybase {
private:
std::vector<double> positions; //!< Flattened position buffer for all particles
std::vector<double> radii; //!< Radii buffer for all particles
std::vector<double> sasa; //!< Target buffer for calculated surface areas
const Space& spc;
double cosolute_molarity = 0.; //!< co-solute concentration (mol/l)
std::unique_ptr<freesasa_parameters_fwd> parameters; //!< Parameters for freesasa
Average<double> mean_surface_area;
void to_json(json &j) const override;
void sync(Energybase* energybase_ptr, const Change& change) override;
void updateSASA(const Change& change);
void init() override;
/**
* @brief Copies radii from Space to internal buffer
* @param begin Iterator to first particle
* @param end Iterator to beyond last particle
* @param change Change object (currently unused)
*/
template <typename Tfirst, typename Tend>
void updateRadii(Tfirst begin, Tend end, [[maybe_unused]] const Change& change) {
const auto number_of_particles = std::distance(begin, end);
radii.clear();
radii.reserve(number_of_particles);
std::transform(begin, end, std::back_inserter(radii),
[](const Particle& particle) { return particle.traits().sigma * 0.5; });
}
/**
* @brief Copies positions from Space to internal (flattened) buffer
* @param begin Iterator to first particle
* @param end Iterator to beyond last particle
* @param change Change object (currently unused)
*/
template <typename Tfirst, typename Tend>
void updatePositions(Tfirst begin, Tend end, [[maybe_unused]] const Change& change) {
const auto number_of_particles = std::distance(begin, end);
positions.clear();
positions.reserve(3 * number_of_particles);
for (const auto& particle : spc.activeParticles()) {
const auto* xyz = particle.pos.data();
positions.insert(positions.end(), xyz, xyz + 3);
}
}
public:
/**
* @param spc
* @param cosolute_molarity in particles per angstrom cubed
* @param probe_radius in angstrom
*/
FreeSASAEnergy(const Space& spc, double cosolute_molarity = 0.0, double probe_radius = 1.4);
FreeSASAEnergy(const json& j, const Space& spc);
double energy(Change& change) override;
const std::vector<double>& getAreas() const { return sasa; }
}; //!< SASA energy from transfer free energies
#endif
/**
* @brief class for calculating SASA energies calculating SASA of each particle every step
*
*/
class SASAEnergyBase : public Energybase {
public:
using index_type = size_t;
std::vector<double> areas; //!< Target buffer for calculated surface areas
Space& spc;
double cosolute_molarity = 0.; //!< co-solute concentration (mol/l)
std::unique_ptr<SASA::SASABase> sasa; //!< performs neighbour searching and subsequent sasa calculation
private:
void to_json(json& j) const override;
void sync(Energybase* energybase_ptr, const Change& change) override;
void init() override;
protected:
/**
* @brief returns absolute index of particle in ParticleVector
* @param particle
*/
inline auto indexOf(const Particle& particle) const {
return static_cast<index_type>(std::addressof(particle) - std::addressof(spc.particles.at(0)));
}
public:
SASAEnergyBase(Space& spc, double cosolute_molarity = 0.0, double probe_radius = 1.4, int slices_per_atom = 20,
bool dense_container = true);
SASAEnergyBase(const json& j, Space& spc);
const std::vector<double>& getAreas() const { return areas; }
double energy(Change& change) override;
}; //!< SASA energy from transfer free energies with SASA calculation each step
/**
* @brief class for calculating SASA energies calculating SASA of particles based on change object every step
*
*/
class SASAEnergy : public SASAEnergyBase {
private:
std::vector<std::vector<index_type>>
current_neighbours; //!< holds cached neighbour indices for each particle in ParticleVector
std::vector<index_type> changed_indices; //!< paritcle indices whose SASA changed based on change object
void to_json(json& j) const override;
void sync(Energybase* energybase_ptr, const Change& change) override;
void init() override;
void updateChangedIndices(const Change& change);
void insertChangedNeighboursOf(const index_type index, std::set<index_type>& target_indices) const;
public:
SASAEnergy(Space& spc, double cosolute_molarity = 0.0, double probe_radius = 1.4, int slices_per_atom = 20,
bool dense_container = true);
SASAEnergy(const json& j, Space& spc);
double energy(Change& change) override;
}; //!< SASA energy from transfer free energies
/**
* @brief Oscillating energy on a single particle
*
* This is 2D version of the oscillating potential used
* to illustrate parallel tempering in the book
* "Understanding Molecular Simulation" by D. Frenkel.
*/
class Example2D : public Energybase {
private:
bool use_2d = true; // Set to false to apply energy only along x (as by the book)
double scale_energy = 1.0; // effective temperature
const Point &particle; // reference to 1st particle in the system
void to_json(json &j) const override;
public:
Example2D(const json &j, Space &spc);
double energy(Change &change) override;
};
/**
* @brief Aggregate and sum energy terms
*/
class Hamiltonian : public Energybase, public BasePointerVector<Energybase> {
private:
double maximum_allowed_energy = pc::infty; //!< Maximum allowed energy change
std::vector<double> latest_energies; //!< Placeholder for the lastest energies for each energy term
decltype(vec)& energy_terms; //!< Alias for `vec`
void addEwald(const json& j, Space& spc); //!< Adds an instance of reciprocal space Ewald energies (if appropriate)
void checkBondedMolecules() const; //!< Warn if bonded molecules and no bonded energy term
void to_json(json& j) const override;
void force(PointVector& forces) override;
std::shared_ptr<Energybase> createEnergy(Space& spc, const std::string& name, const json& j);
public:
Hamiltonian(Space& spc, const json& j);
void init() override;
void sync(Energybase* other_hamiltonian, const Change& change) override;
double energy(Change& change) override; //!< Energy due to changes
const std::vector<double>& latestEnergies() const; //!< Energies for each term from the latest call to `energy()`
};
} // namespace Energy
} // namespace Faunus
|
dependences.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#include "callback.h"
#include <omp.h>
#include <math.h>
#include <unistd.h>
int main() {
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
print_ids(0);
printf("%" PRIu64 ": address of x: %p\n", ompt_get_thread_data()->value,
&x);
#pragma omp task depend(out : x)
{
x++;
delay(100);
}
print_fuzzy_address(1);
print_ids(0);
#pragma omp task depend(in : x)
{ x = -1; }
print_ids(0);
}
}
x++;
return 0;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dependences'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_depende
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: new_task_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT:0x[0-f]+]],
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: address of x: [[ADDRX:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[FIRST_TASK:[0-f]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}},
// CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[FIRST_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_inout)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]],
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[SECOND_TASK:[0-f]+]], codeptr_ra={{0x[0-f]+}},
// CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[SECOND_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_in)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_dependence_pair:
// CHECK-SAME: first_task_id=[[FIRST_TASK]], second_task_id=[[SECOND_TASK]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]],
// CHECK-SAME: reenter_frame=[[NULL]]
|
rowwise_pick.h | /*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/rowwise_pick.h
* \brief Template implementation for rowwise pick operators.
*/
#ifndef DGL_ARRAY_CPU_ROWWISE_PICK_H_
#define DGL_ARRAY_CPU_ROWWISE_PICK_H_
#include <dgl/array.h>
#include <functional>
namespace dgl {
namespace aten {
namespace impl {
// User-defined function for picking elements from one row.
//
// The column indices of the given row are stored in
// [col + off, col + off + len)
//
// Similarly, the data indices are stored in
// [data + off, data + off + len)
// Data index pointer could be NULL, which means data[i] == i
//
// *ATTENTION*: This function will be invoked concurrently. Please make sure
// it is thread-safe.
//
// \param rowid The row to pick from.
// \param off Starting offset of this row.
// \param len NNZ of the row.
// \param col Pointer of the column indices.
// \param data Pointer of the data indices.
// \param out_idx Picked indices in [off, off + len).
template <typename IdxType>
using PickFn = std::function<void(
IdxType rowid, IdxType off, IdxType len,
const IdxType* col, const IdxType* data,
IdxType* out_idx)>;
// Template for picking non-zero values row-wise. The implementation utilizes
// OpenMP parallelization on rows because each row performs computation independently.
template <typename IdxType>
COOMatrix CSRRowWisePick(CSRMatrix mat, IdArray rows,
int64_t num_picks, bool replace, PickFn<IdxType> pick_fn) {
using namespace aten;
const IdxType* indptr = static_cast<IdxType*>(mat.indptr->data);
const IdxType* indices = static_cast<IdxType*>(mat.indices->data);
const IdxType* data = CSRHasData(mat)? static_cast<IdxType*>(mat.data->data) : nullptr;
const IdxType* rows_data = static_cast<IdxType*>(rows->data);
const int64_t num_rows = rows->shape[0];
const auto& ctx = mat.indptr->ctx;
// To leverage OMP parallelization, we create two arrays to store
// picked src and dst indices. Each array is of length num_rows * num_picks.
// For rows whose nnz < num_picks, the indices are padded with -1.
//
// We check whether all the given rows
// have at least num_picks number of nnz when replace is false.
//
// If the check holds, remove -1 elements by remove_if operation, which simply
// moves valid elements to the head of arrays and create a view of the original
// array. The implementation consumes a little extra memory than the actual requirement.
//
// Otherwise, directly use the row and col arrays to construct the result COO matrix.
//
// [02/29/2020 update]: OMP is disabled for now since batch-wise parallelism is more
// significant. (minjie)
IdArray picked_row = Full(-1, num_rows * num_picks, sizeof(IdxType) * 8, ctx);
IdArray picked_col = Full(-1, num_rows * num_picks, sizeof(IdxType) * 8, ctx);
IdArray picked_idx = Full(-1, num_rows * num_picks, sizeof(IdxType) * 8, ctx);
IdxType* picked_rdata = static_cast<IdxType*>(picked_row->data);
IdxType* picked_cdata = static_cast<IdxType*>(picked_col->data);
IdxType* picked_idata = static_cast<IdxType*>(picked_idx->data);
bool all_has_fanout = true;
if (replace) {
all_has_fanout = true;
} else {
#pragma omp parallel for reduction(&&:all_has_fanout)
for (int64_t i = 0; i < num_rows; ++i) {
const IdxType rid = rows_data[i];
const IdxType len = indptr[rid + 1] - indptr[rid];
all_has_fanout = all_has_fanout && (len >= num_picks);
}
}
#pragma omp parallel for
for (int64_t i = 0; i < num_rows; ++i) {
const IdxType rid = rows_data[i];
CHECK_LT(rid, mat.num_rows);
const IdxType off = indptr[rid];
const IdxType len = indptr[rid + 1] - off;
if (len <= num_picks && !replace) {
// nnz <= num_picks and w/o replacement, take all nnz
for (int64_t j = 0; j < len; ++j) {
picked_rdata[i * num_picks + j] = rid;
picked_cdata[i * num_picks + j] = indices[off + j];
picked_idata[i * num_picks + j] = data? data[off + j] : off + j;
}
} else {
pick_fn(rid, off, len,
indices, data,
picked_idata + i * num_picks);
for (int64_t j = 0; j < num_picks; ++j) {
const IdxType picked = picked_idata[i * num_picks + j];
picked_rdata[i * num_picks + j] = rid;
picked_cdata[i * num_picks + j] = indices[picked];
picked_idata[i * num_picks + j] = data? data[picked] : picked;
}
}
}
if (!all_has_fanout) {
// correct the array by remove_if
IdxType* new_row_end = std::remove_if(picked_rdata, picked_rdata + num_rows * num_picks,
[] (IdxType i) { return i == -1; });
IdxType* new_col_end = std::remove_if(picked_cdata, picked_cdata + num_rows * num_picks,
[] (IdxType i) { return i == -1; });
IdxType* new_idx_end = std::remove_if(picked_idata, picked_idata + num_rows * num_picks,
[] (IdxType i) { return i == -1; });
const int64_t new_len = (new_row_end - picked_rdata);
CHECK_EQ(new_col_end - picked_cdata, new_len);
CHECK_EQ(new_idx_end - picked_idata, new_len);
picked_row = picked_row.CreateView({new_len}, picked_row->dtype);
picked_col = picked_col.CreateView({new_len}, picked_col->dtype);
picked_idx = picked_idx.CreateView({new_len}, picked_idx->dtype);
}
return COOMatrix(mat.num_rows, mat.num_cols,
picked_row, picked_col, picked_idx);
}
// Template for picking non-zero values row-wise. The implementation first slices
// out the corresponding rows and then converts it to CSR format. It then performs
// row-wise pick on the CSR matrix and rectifies the returned results.
template <typename IdxType>
COOMatrix COORowWisePick(COOMatrix mat, IdArray rows,
int64_t num_picks, bool replace, PickFn<IdxType> pick_fn) {
using namespace aten;
const auto& csr = COOToCSR(COOSliceRows(mat, rows));
const IdArray new_rows = Range(0, rows->shape[0], rows->dtype.bits, rows->ctx);
const auto& picked = CSRRowWisePick<IdxType>(csr, new_rows, num_picks, replace, pick_fn);
return COOMatrix(mat.num_rows, mat.num_cols,
IndexSelect(rows, picked.row), // map the row index to the correct one
picked.col,
picked.data);
}
} // namespace impl
} // namespace aten
} // namespace dgl
#endif // DGL_ARRAY_CPU_ROWWISE_PICK_H_
|
GB_unaryop__minv_int8_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int8_uint64
// op(A') function: GB_tran__minv_int8_uint64
// C type: int8_t
// A type: uint64_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int8_uint64
(
int8_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int8_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
feature.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF EEEEE AAA TTTTT U U RRRR EEEEE %
% F E A A T U U R R E %
% FFF EEE AAAAA T U U RRRR EEE %
% F E A A T U U R R E %
% F EEEEE A A T UUU R R EEEEE %
% %
% %
% MagickCore Image Feature Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/feature.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a n n y E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of
% edges in images.
%
% The format of the CannyEdgeImage method is:
%
% Image *CannyEdgeImage(const Image *image,const double radius,
% const double sigma,const double lower_percent,
% const double upper_percent,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the gaussian smoothing filter.
%
% o sigma: the sigma of the gaussian smoothing filter.
%
% o lower_percent: percentage of edge pixels in the lower threshold.
%
% o upper_percent: percentage of edge pixels in the upper threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _CannyInfo
{
double
magnitude,
intensity;
int
orientation;
ssize_t
x,
y;
} CannyInfo;
static inline MagickBooleanType IsAuthenticPixel(const Image *image,
const ssize_t x,const ssize_t y)
{
if ((x < 0) || (x >= (ssize_t) image->columns))
return(MagickFalse);
if ((y < 0) || (y >= (ssize_t) image->rows))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view,
MatrixInfo *canny_cache,const ssize_t x,const ssize_t y,
const double lower_threshold,ExceptionInfo *exception)
{
CannyInfo
edge,
pixel;
MagickBooleanType
status;
register Quantum
*q;
register ssize_t
i;
q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
*q=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
edge.x=x;
edge.y=y;
if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
for (i=1; i != 0; )
{
ssize_t
v;
i--;
status=GetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
for (v=(-1); v <= 1; v++)
{
ssize_t
u;
for (u=(-1); u <= 1; u++)
{
if ((u == 0) && (v == 0))
continue;
if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse)
continue;
/*
Not an edge if gradient value is below the lower threshold.
*/
q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1,
exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel);
if (status == MagickFalse)
return(MagickFalse);
if ((GetPixelIntensity(edge_image,q) == 0.0) &&
(pixel.intensity >= lower_threshold))
{
*q=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
edge.x+=u;
edge.y+=v;
status=SetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
i++;
}
}
}
}
return(MagickTrue);
}
MagickExport Image *CannyEdgeImage(const Image *image,const double radius,
const double sigma,const double lower_percent,const double upper_percent,
ExceptionInfo *exception)
{
#define CannyEdgeImageTag "CannyEdge/Image"
CacheView
*edge_view;
CannyInfo
element;
char
geometry[MagickPathExtent];
double
lower_threshold,
max,
min,
upper_threshold;
Image
*edge_image;
KernelInfo
*kernel_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*canny_cache;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Filter out noise.
*/
(void) FormatLocaleString(geometry,MagickPathExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
edge_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
if (TransformImageColorspace(edge_image,GRAYColorspace,exception) == MagickFalse)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(edge_image,OffAlphaChannel,exception);
/*
Find the intensity gradient of the image.
*/
canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows,
sizeof(CannyInfo),exception);
if (canny_cache == (MatrixInfo *) NULL)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
status=MagickTrue;
edge_view=AcquireVirtualCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
double
dx,
dy;
register const Quantum
*magick_restrict kernel_pixels;
ssize_t
v;
static double
Gx[2][2] =
{
{ -1.0, +1.0 },
{ -1.0, +1.0 }
},
Gy[2][2] =
{
{ +1.0, +1.0 },
{ -1.0, -1.0 }
};
(void) memset(&pixel,0,sizeof(pixel));
dx=0.0;
dy=0.0;
kernel_pixels=p;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
double
intensity;
intensity=GetPixelIntensity(edge_image,kernel_pixels+u);
dx+=0.5*Gx[v][u]*intensity;
dy+=0.5*Gy[v][u]*intensity;
}
kernel_pixels+=edge_image->columns+1;
}
pixel.magnitude=hypot(dx,dy);
pixel.orientation=0;
if (fabs(dx) > MagickEpsilon)
{
double
slope;
slope=dy/dx;
if (slope < 0.0)
{
if (slope < -2.41421356237)
pixel.orientation=0;
else
if (slope < -0.414213562373)
pixel.orientation=1;
else
pixel.orientation=2;
}
else
{
if (slope > 2.41421356237)
pixel.orientation=0;
else
if (slope > 0.414213562373)
pixel.orientation=3;
else
pixel.orientation=2;
}
}
if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse)
continue;
p+=GetPixelChannels(edge_image);
}
}
edge_view=DestroyCacheView(edge_view);
/*
Non-maxima suppression, remove pixels that are not considered to be part
of an edge.
*/
progress=0;
(void) GetMatrixElement(canny_cache,0,0,&element);
max=element.intensity;
min=element.intensity;
edge_view=AcquireAuthenticCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
alpha_pixel,
beta_pixel,
pixel;
(void) GetMatrixElement(canny_cache,x,y,&pixel);
switch (pixel.orientation)
{
case 0:
default:
{
/*
0 degrees, north and south.
*/
(void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel);
break;
}
case 1:
{
/*
45 degrees, northwest and southeast.
*/
(void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel);
break;
}
case 2:
{
/*
90 degrees, east and west.
*/
(void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel);
break;
}
case 3:
{
/*
135 degrees, northeast and southwest.
*/
(void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel);
(void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel);
break;
}
}
pixel.intensity=pixel.magnitude;
if ((pixel.magnitude < alpha_pixel.magnitude) ||
(pixel.magnitude < beta_pixel.magnitude))
pixel.intensity=0;
(void) SetMatrixElement(canny_cache,x,y,&pixel);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
{
if (pixel.intensity < min)
min=pixel.intensity;
if (pixel.intensity > max)
max=pixel.intensity;
}
*q=0;
q+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse)
status=MagickFalse;
}
edge_view=DestroyCacheView(edge_view);
/*
Estimate hysteresis threshold.
*/
lower_threshold=lower_percent*(max-min)+min;
upper_threshold=upper_percent*(max-min)+min;
/*
Hysteresis threshold.
*/
edge_view=AcquireAuthenticCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
register const Quantum
*magick_restrict p;
/*
Edge if pixel gradient higher than upper threshold.
*/
p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception);
if (p == (const Quantum *) NULL)
continue;
status=GetMatrixElement(canny_cache,x,y,&pixel);
if (status == MagickFalse)
continue;
if ((GetPixelIntensity(edge_image,p) == 0.0) &&
(pixel.intensity >= upper_threshold))
status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold,
exception);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CannyEdgeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
edge_view=DestroyCacheView(edge_view);
/*
Free resources.
*/
canny_cache=DestroyMatrixInfo(canny_cache);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e F e a t u r e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageFeatures() returns features for each channel in the image in
% each of four directions (horizontal, vertical, left and right diagonals)
% for the specified distance. The features include the angular second
% moment, contrast, correlation, sum of squares: variance, inverse difference
% moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information
% measures of correlation 2, and maximum correlation coefficient. You can
% access the red channel contrast, for example, like this:
%
% channel_features=GetImageFeatures(image,1,exception);
% contrast=channel_features[RedPixelChannel].contrast[0];
%
% Use MagickRelinquishMemory() to free the features buffer.
%
% The format of the GetImageFeatures method is:
%
% ChannelFeatures *GetImageFeatures(const Image *image,
% const size_t distance,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o distance: the distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelFeatures *GetImageFeatures(const Image *image,
const size_t distance,ExceptionInfo *exception)
{
typedef struct _ChannelStatistics
{
PixelInfo
direction[4]; /* horizontal, vertical, left and right diagonals */
} ChannelStatistics;
CacheView
*image_view;
ChannelFeatures
*channel_features;
ChannelStatistics
**cooccurrence,
correlation,
*density_x,
*density_xy,
*density_y,
entropy_x,
entropy_xy,
entropy_xy1,
entropy_xy2,
entropy_y,
mean,
**Q,
*sum,
sum_squares,
variance;
PixelPacket
gray,
*grays;
MagickBooleanType
status;
register ssize_t
i,
r;
size_t
length;
unsigned int
number_grays;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns < (distance+1)) || (image->rows < (distance+1)))
return((ChannelFeatures *) NULL);
length=MaxPixelChannels+1UL;
channel_features=(ChannelFeatures *) AcquireQuantumMemory(length,
sizeof(*channel_features));
if (channel_features == (ChannelFeatures *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_features,0,length*
sizeof(*channel_features));
/*
Form grays.
*/
grays=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays));
if (grays == (PixelPacket *) NULL)
{
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
for (i=0; i <= (ssize_t) MaxMap; i++)
{
grays[i].red=(~0U);
grays[i].green=(~0U);
grays[i].blue=(~0U);
grays[i].alpha=(~0U);
grays[i].black=(~0U);
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (r=0; r < (ssize_t) image->rows; r++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,r,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
grays[ScaleQuantumToMap(GetPixelRed(image,p))].red=
ScaleQuantumToMap(GetPixelRed(image,p));
grays[ScaleQuantumToMap(GetPixelGreen(image,p))].green=
ScaleQuantumToMap(GetPixelGreen(image,p));
grays[ScaleQuantumToMap(GetPixelBlue(image,p))].blue=
ScaleQuantumToMap(GetPixelBlue(image,p));
if (image->colorspace == CMYKColorspace)
grays[ScaleQuantumToMap(GetPixelBlack(image,p))].black=
ScaleQuantumToMap(GetPixelBlack(image,p));
if (image->alpha_trait != UndefinedPixelTrait)
grays[ScaleQuantumToMap(GetPixelAlpha(image,p))].alpha=
ScaleQuantumToMap(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
return(channel_features);
}
(void) memset(&gray,0,sizeof(gray));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if (grays[i].red != ~0U)
grays[gray.red++].red=grays[i].red;
if (grays[i].green != ~0U)
grays[gray.green++].green=grays[i].green;
if (grays[i].blue != ~0U)
grays[gray.blue++].blue=grays[i].blue;
if (image->colorspace == CMYKColorspace)
if (grays[i].black != ~0U)
grays[gray.black++].black=grays[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
if (grays[i].alpha != ~0U)
grays[gray.alpha++].alpha=grays[i].alpha;
}
/*
Allocate spatial dependence matrix.
*/
number_grays=gray.red;
if (gray.green > number_grays)
number_grays=gray.green;
if (gray.blue > number_grays)
number_grays=gray.blue;
if (image->colorspace == CMYKColorspace)
if (gray.black > number_grays)
number_grays=gray.black;
if (image->alpha_trait != UndefinedPixelTrait)
if (gray.alpha > number_grays)
number_grays=gray.alpha;
cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays,
sizeof(*cooccurrence));
density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_x));
density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_xy));
density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_y));
Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q));
sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum));
if ((cooccurrence == (ChannelStatistics **) NULL) ||
(density_x == (ChannelStatistics *) NULL) ||
(density_xy == (ChannelStatistics *) NULL) ||
(density_y == (ChannelStatistics *) NULL) ||
(Q == (ChannelStatistics **) NULL) ||
(sum == (ChannelStatistics *) NULL))
{
if (Q != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
}
if (sum != (ChannelStatistics *) NULL)
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
if (density_y != (ChannelStatistics *) NULL)
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
if (density_xy != (ChannelStatistics *) NULL)
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
if (density_x != (ChannelStatistics *) NULL)
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
if (cooccurrence != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(
cooccurrence);
}
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
(void) memset(&correlation,0,sizeof(correlation));
(void) memset(density_x,0,2*(number_grays+1)*sizeof(*density_x));
(void) memset(density_xy,0,2*(number_grays+1)*sizeof(*density_xy));
(void) memset(density_y,0,2*(number_grays+1)*sizeof(*density_y));
(void) memset(&mean,0,sizeof(mean));
(void) memset(sum,0,number_grays*sizeof(*sum));
(void) memset(&sum_squares,0,sizeof(sum_squares));
(void) memset(density_xy,0,2*number_grays*sizeof(*density_xy));
(void) memset(&entropy_x,0,sizeof(entropy_x));
(void) memset(&entropy_xy,0,sizeof(entropy_xy));
(void) memset(&entropy_xy1,0,sizeof(entropy_xy1));
(void) memset(&entropy_xy2,0,sizeof(entropy_xy2));
(void) memset(&entropy_y,0,sizeof(entropy_y));
(void) memset(&variance,0,sizeof(variance));
for (i=0; i < (ssize_t) number_grays; i++)
{
cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,
sizeof(**cooccurrence));
Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q));
if ((cooccurrence[i] == (ChannelStatistics *) NULL) ||
(Q[i] == (ChannelStatistics *) NULL))
break;
(void) memset(cooccurrence[i],0,number_grays*
sizeof(**cooccurrence));
(void) memset(Q[i],0,number_grays*sizeof(**Q));
}
if (i < (ssize_t) number_grays)
{
for (i--; i >= 0; i--)
{
if (Q[i] != (ChannelStatistics *) NULL)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
if (cooccurrence[i] != (ChannelStatistics *) NULL)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
}
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Initialize spatial dependence matrix.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (r=0; r < (ssize_t) image->rows; r++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
ssize_t
offset,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,r,image->columns+
2*distance,distance+2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=distance*GetPixelChannels(image);;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < 4; i++)
{
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
offset=(ssize_t) distance;
break;
}
case 1:
{
/*
Vertical adjacency.
*/
offset=(ssize_t) (image->columns+2*distance);
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)+distance);
break;
}
}
u=0;
v=0;
while (grays[u].red != ScaleQuantumToMap(GetPixelRed(image,p)))
u++;
while (grays[v].red != ScaleQuantumToMap(GetPixelRed(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].red++;
cooccurrence[v][u].direction[i].red++;
u=0;
v=0;
while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(image,p)))
u++;
while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].green++;
cooccurrence[v][u].direction[i].green++;
u=0;
v=0;
while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(image,p)))
u++;
while (grays[v].blue != ScaleQuantumToMap(GetPixelBlue(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].blue++;
cooccurrence[v][u].direction[i].blue++;
if (image->colorspace == CMYKColorspace)
{
u=0;
v=0;
while (grays[u].black != ScaleQuantumToMap(GetPixelBlack(image,p)))
u++;
while (grays[v].black != ScaleQuantumToMap(GetPixelBlack(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].black++;
cooccurrence[v][u].direction[i].black++;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
u=0;
v=0;
while (grays[u].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p)))
u++;
while (grays[v].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].alpha++;
cooccurrence[v][u].direction[i].alpha++;
}
}
p+=GetPixelChannels(image);
}
}
grays=(PixelPacket *) RelinquishMagickMemory(grays);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Normalize spatial dependence matrix.
*/
for (i=0; i < 4; i++)
{
double
normalize;
register ssize_t
y;
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
normalize=2.0*image->rows*(image->columns-distance);
break;
}
case 1:
{
/*
Vertical adjacency.
*/
normalize=2.0*(image->rows-distance)*image->columns;
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
}
normalize=PerceptibleReciprocal(normalize);
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
cooccurrence[x][y].direction[i].red*=normalize;
cooccurrence[x][y].direction[i].green*=normalize;
cooccurrence[x][y].direction[i].blue*=normalize;
if (image->colorspace == CMYKColorspace)
cooccurrence[x][y].direction[i].black*=normalize;
if (image->alpha_trait != UndefinedPixelTrait)
cooccurrence[x][y].direction[i].alpha*=normalize;
}
}
}
/*
Compute texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Angular second moment: measure of homogeneity of the image.
*/
channel_features[RedPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].red*
cooccurrence[x][y].direction[i].red;
channel_features[GreenPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].green*
cooccurrence[x][y].direction[i].green;
channel_features[BluePixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].blue*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].black*
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].alpha*
cooccurrence[x][y].direction[i].alpha;
/*
Correlation: measure of linear-dependencies in the image.
*/
sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum[y].direction[i].black+=cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
sum[y].direction[i].alpha+=cooccurrence[x][y].direction[i].alpha;
correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red;
correlation.direction[i].green+=x*y*
cooccurrence[x][y].direction[i].green;
correlation.direction[i].blue+=x*y*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
correlation.direction[i].black+=x*y*
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
correlation.direction[i].alpha+=x*y*
cooccurrence[x][y].direction[i].alpha;
/*
Inverse Difference Moment.
*/
channel_features[RedPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1);
channel_features[GreenPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1);
channel_features[BluePixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].black/((y-x)*(y-x)+1);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].alpha/((y-x)*(y-x)+1);
/*
Sum average.
*/
density_xy[y+x+2].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[y+x+2].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[y+x+2].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[y+x+2].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_xy[y+x+2].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
/*
Entropy.
*/
channel_features[RedPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
channel_features[GreenPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
channel_features[BluePixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].black*
MagickLog10(cooccurrence[x][y].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].alpha*
MagickLog10(cooccurrence[x][y].direction[i].alpha);
/*
Information Measures of Correlation.
*/
density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->alpha_trait != UndefinedPixelTrait)
density_x[x].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
if (image->colorspace == CMYKColorspace)
density_x[x].direction[i].black+=
cooccurrence[x][y].direction[i].black;
density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_y[y].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_y[y].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
}
mean.direction[i].red+=y*sum[y].direction[i].red;
sum_squares.direction[i].red+=y*y*sum[y].direction[i].red;
mean.direction[i].green+=y*sum[y].direction[i].green;
sum_squares.direction[i].green+=y*y*sum[y].direction[i].green;
mean.direction[i].blue+=y*sum[y].direction[i].blue;
sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
{
mean.direction[i].black+=y*sum[y].direction[i].black;
sum_squares.direction[i].black+=y*y*sum[y].direction[i].black;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
mean.direction[i].alpha+=y*sum[y].direction[i].alpha;
sum_squares.direction[i].alpha+=y*y*sum[y].direction[i].alpha;
}
}
/*
Correlation: measure of linear-dependencies in the image.
*/
channel_features[RedPixelChannel].correlation[i]=
(correlation.direction[i].red-mean.direction[i].red*
mean.direction[i].red)/(sqrt(sum_squares.direction[i].red-
(mean.direction[i].red*mean.direction[i].red))*sqrt(
sum_squares.direction[i].red-(mean.direction[i].red*
mean.direction[i].red)));
channel_features[GreenPixelChannel].correlation[i]=
(correlation.direction[i].green-mean.direction[i].green*
mean.direction[i].green)/(sqrt(sum_squares.direction[i].green-
(mean.direction[i].green*mean.direction[i].green))*sqrt(
sum_squares.direction[i].green-(mean.direction[i].green*
mean.direction[i].green)));
channel_features[BluePixelChannel].correlation[i]=
(correlation.direction[i].blue-mean.direction[i].blue*
mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue-
(mean.direction[i].blue*mean.direction[i].blue))*sqrt(
sum_squares.direction[i].blue-(mean.direction[i].blue*
mean.direction[i].blue)));
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].correlation[i]=
(correlation.direction[i].black-mean.direction[i].black*
mean.direction[i].black)/(sqrt(sum_squares.direction[i].black-
(mean.direction[i].black*mean.direction[i].black))*sqrt(
sum_squares.direction[i].black-(mean.direction[i].black*
mean.direction[i].black)));
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].correlation[i]=
(correlation.direction[i].alpha-mean.direction[i].alpha*
mean.direction[i].alpha)/(sqrt(sum_squares.direction[i].alpha-
(mean.direction[i].alpha*mean.direction[i].alpha))*sqrt(
sum_squares.direction[i].alpha-(mean.direction[i].alpha*
mean.direction[i].alpha)));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=2; x < (ssize_t) (2*number_grays); x++)
{
/*
Sum average.
*/
channel_features[RedPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].red;
channel_features[GreenPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].green;
channel_features[BluePixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].alpha;
/*
Sum entropy.
*/
channel_features[RedPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BluePixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].black*
MagickLog10(density_xy[x].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].alpha*
MagickLog10(density_xy[x].direction[i].alpha);
/*
Sum variance.
*/
channel_features[RedPixelChannel].sum_variance[i]+=
(x-channel_features[RedPixelChannel].sum_entropy[i])*
(x-channel_features[RedPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].red;
channel_features[GreenPixelChannel].sum_variance[i]+=
(x-channel_features[GreenPixelChannel].sum_entropy[i])*
(x-channel_features[GreenPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].green;
channel_features[BluePixelChannel].sum_variance[i]+=
(x-channel_features[BluePixelChannel].sum_entropy[i])*
(x-channel_features[BluePixelChannel].sum_entropy[i])*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_variance[i]+=
(x-channel_features[BlackPixelChannel].sum_entropy[i])*
(x-channel_features[BlackPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_variance[i]+=
(x-channel_features[AlphaPixelChannel].sum_entropy[i])*
(x-channel_features[AlphaPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].alpha;
}
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Sum of Squares: Variance
*/
variance.direction[i].red+=(y-mean.direction[i].red+1)*
(y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red;
variance.direction[i].green+=(y-mean.direction[i].green+1)*
(y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green;
variance.direction[i].blue+=(y-mean.direction[i].blue+1)*
(y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].black+=(y-mean.direction[i].black+1)*
(y-mean.direction[i].black+1)*cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
variance.direction[i].alpha+=(y-mean.direction[i].alpha+1)*
(y-mean.direction[i].alpha+1)*
cooccurrence[x][y].direction[i].alpha;
/*
Sum average / Difference Variance.
*/
density_xy[MagickAbsoluteValue(y-x)].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[MagickAbsoluteValue(y-x)].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[MagickAbsoluteValue(y-x)].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_xy[MagickAbsoluteValue(y-x)].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
/*
Information Measures of Correlation.
*/
entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
entropy_xy.direction[i].black-=cooccurrence[x][y].direction[i].black*
MagickLog10(cooccurrence[x][y].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy.direction[i].alpha-=
cooccurrence[x][y].direction[i].alpha*MagickLog10(
cooccurrence[x][y].direction[i].alpha);
entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red*
MagickLog10(density_x[x].direction[i].red*density_y[y].direction[i].red));
entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green*
MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue*density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy1.direction[i].black-=(
cooccurrence[x][y].direction[i].black*MagickLog10(
density_x[x].direction[i].black*density_y[y].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy1.direction[i].alpha-=(
cooccurrence[x][y].direction[i].alpha*MagickLog10(
density_x[x].direction[i].alpha*density_y[y].direction[i].alpha));
entropy_xy2.direction[i].red-=(density_x[x].direction[i].red*
density_y[y].direction[i].red*MagickLog10(density_x[x].direction[i].red*
density_y[y].direction[i].red));
entropy_xy2.direction[i].green-=(density_x[x].direction[i].green*
density_y[y].direction[i].green*MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue*
density_y[y].direction[i].blue*MagickLog10(density_x[x].direction[i].blue*
density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy2.direction[i].black-=(density_x[x].direction[i].black*
density_y[y].direction[i].black*MagickLog10(
density_x[x].direction[i].black*density_y[y].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy2.direction[i].alpha-=(density_x[x].direction[i].alpha*
density_y[y].direction[i].alpha*MagickLog10(
density_x[x].direction[i].alpha*density_y[y].direction[i].alpha));
}
}
channel_features[RedPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].red;
channel_features[GreenPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].green;
channel_features[BluePixelChannel].variance_sum_of_squares[i]=
variance.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].alpha;
}
/*
Compute more texture features.
*/
(void) memset(&variance,0,sizeof(variance));
(void) memset(&sum_squares,0,sizeof(sum_squares));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Difference variance.
*/
variance.direction[i].red+=density_xy[x].direction[i].red;
variance.direction[i].green+=density_xy[x].direction[i].green;
variance.direction[i].blue+=density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].black+=density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
variance.direction[i].alpha+=density_xy[x].direction[i].alpha;
sum_squares.direction[i].red+=density_xy[x].direction[i].red*
density_xy[x].direction[i].red;
sum_squares.direction[i].green+=density_xy[x].direction[i].green*
density_xy[x].direction[i].green;
sum_squares.direction[i].blue+=density_xy[x].direction[i].blue*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum_squares.direction[i].black+=density_xy[x].direction[i].black*
density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
sum_squares.direction[i].alpha+=density_xy[x].direction[i].alpha*
density_xy[x].direction[i].alpha;
/*
Difference entropy.
*/
channel_features[RedPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BluePixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].black*
MagickLog10(density_xy[x].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].alpha*
MagickLog10(density_xy[x].direction[i].alpha);
/*
Information Measures of Correlation.
*/
entropy_x.direction[i].red-=(density_x[x].direction[i].red*
MagickLog10(density_x[x].direction[i].red));
entropy_x.direction[i].green-=(density_x[x].direction[i].green*
MagickLog10(density_x[x].direction[i].green));
entropy_x.direction[i].blue-=(density_x[x].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_x.direction[i].black-=(density_x[x].direction[i].black*
MagickLog10(density_x[x].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_x.direction[i].alpha-=(density_x[x].direction[i].alpha*
MagickLog10(density_x[x].direction[i].alpha));
entropy_y.direction[i].red-=(density_y[x].direction[i].red*
MagickLog10(density_y[x].direction[i].red));
entropy_y.direction[i].green-=(density_y[x].direction[i].green*
MagickLog10(density_y[x].direction[i].green));
entropy_y.direction[i].blue-=(density_y[x].direction[i].blue*
MagickLog10(density_y[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_y.direction[i].black-=(density_y[x].direction[i].black*
MagickLog10(density_y[x].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_y.direction[i].alpha-=(density_y[x].direction[i].alpha*
MagickLog10(density_y[x].direction[i].alpha));
}
/*
Difference variance.
*/
channel_features[RedPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].red)-
(variance.direction[i].red*variance.direction[i].red))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[GreenPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].green)-
(variance.direction[i].green*variance.direction[i].green))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[BluePixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].blue)-
(variance.direction[i].blue*variance.direction[i].blue))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].black)-
(variance.direction[i].black*variance.direction[i].black))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].alpha)-
(variance.direction[i].alpha*variance.direction[i].alpha))/
((double) number_grays*number_grays*number_grays*number_grays);
/*
Information Measures of Correlation.
*/
channel_features[RedPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/
(entropy_x.direction[i].red > entropy_y.direction[i].red ?
entropy_x.direction[i].red : entropy_y.direction[i].red);
channel_features[GreenPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/
(entropy_x.direction[i].green > entropy_y.direction[i].green ?
entropy_x.direction[i].green : entropy_y.direction[i].green);
channel_features[BluePixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/
(entropy_x.direction[i].blue > entropy_y.direction[i].blue ?
entropy_x.direction[i].blue : entropy_y.direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].black-entropy_xy1.direction[i].black)/
(entropy_x.direction[i].black > entropy_y.direction[i].black ?
entropy_x.direction[i].black : entropy_y.direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].alpha-entropy_xy1.direction[i].alpha)/
(entropy_x.direction[i].alpha > entropy_y.direction[i].alpha ?
entropy_x.direction[i].alpha : entropy_y.direction[i].alpha);
channel_features[RedPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].red-
entropy_xy.direction[i].red)))));
channel_features[GreenPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].green-
entropy_xy.direction[i].green)))));
channel_features[BluePixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].blue-
entropy_xy.direction[i].blue)))));
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].black-
entropy_xy.direction[i].black)))));
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].alpha-
entropy_xy.direction[i].alpha)))));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
ssize_t
z;
for (z=0; z < (ssize_t) number_grays; z++)
{
register ssize_t
y;
ChannelStatistics
pixel;
(void) memset(&pixel,0,sizeof(pixel));
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Contrast: amount of local variations present in an image.
*/
if (((y-x) == z) || ((x-y) == z))
{
pixel.direction[i].red+=cooccurrence[x][y].direction[i].red;
pixel.direction[i].green+=cooccurrence[x][y].direction[i].green;
pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
pixel.direction[i].black+=cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
pixel.direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
}
/*
Maximum Correlation Coefficient.
*/
Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red*
cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/
density_y[x].direction[i].red;
Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green*
cooccurrence[y][x].direction[i].green/
density_x[z].direction[i].green/density_y[x].direction[i].red;
Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue*
cooccurrence[y][x].direction[i].blue/density_x[z].direction[i].blue/
density_y[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
Q[z][y].direction[i].black+=cooccurrence[z][x].direction[i].black*
cooccurrence[y][x].direction[i].black/
density_x[z].direction[i].black/density_y[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
Q[z][y].direction[i].alpha+=
cooccurrence[z][x].direction[i].alpha*
cooccurrence[y][x].direction[i].alpha/
density_x[z].direction[i].alpha/
density_y[x].direction[i].alpha;
}
}
channel_features[RedPixelChannel].contrast[i]+=z*z*
pixel.direction[i].red;
channel_features[GreenPixelChannel].contrast[i]+=z*z*
pixel.direction[i].green;
channel_features[BluePixelChannel].contrast[i]+=z*z*
pixel.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].contrast[i]+=z*z*
pixel.direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].contrast[i]+=z*z*
pixel.direction[i].alpha;
}
/*
Maximum Correlation Coefficient.
Future: return second largest eigenvalue of Q.
*/
channel_features[RedPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[GreenPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[BluePixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
}
/*
Relinquish resources.
*/
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
return(channel_features);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H o u g h L i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use HoughLineImage() in conjunction with any binary edge extracted image (we
% recommand Canny) to identify lines in the image. The algorithm accumulates
% counts for every white pixel for every possible orientation (for angles from
% 0 to 179 in 1 degree increments) and distance from the center of the image to
% the corner (in 1 px increments) and stores the counts in an accumulator matrix
% of angle vs distance. The size of the accumulator is 180x(diagonal/2). Next
% it searches this space for peaks in counts and converts the locations of the
% peaks to slope and intercept in the normal x,y input image space. Use the
% slope/intercepts to find the endpoints clipped to the bounds of the image. The
% lines are then drawn. The counts are a measure of the length of the lines
%
% The format of the HoughLineImage method is:
%
% Image *HoughLineImage(const Image *image,const size_t width,
% const size_t height,const size_t threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find line pairs as local maxima in this neighborhood.
%
% o threshold: the line count threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define BoundingBox "viewbox"
DrawInfo
*draw_info;
Image
*image;
MagickBooleanType
status;
/*
Open image.
*/
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image->columns=columns;
image->rows=rows;
draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL);
draw_info->affine.sx=image->resolution.x == 0.0 ? 1.0 : image->resolution.x/
DefaultResolution;
draw_info->affine.sy=image->resolution.y == 0.0 ? 1.0 : image->resolution.y/
DefaultResolution;
image->columns=(size_t) (draw_info->affine.sx*image->columns);
image->rows=(size_t) (draw_info->affine.sy*image->rows);
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
if (SetImageBackgroundColor(image,exception) == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Render drawing.
*/
if (GetBlobStreamData(image) == (unsigned char *) NULL)
draw_info->primitive=FileToString(image->filename,~0UL,exception);
else
{
draw_info->primitive=(char *) AcquireMagickMemory((size_t)
GetBlobSize(image)+1);
if (draw_info->primitive != (char *) NULL)
{
(void) memcpy(draw_info->primitive,GetBlobStreamData(image),
(size_t) GetBlobSize(image));
draw_info->primitive[GetBlobSize(image)]='\0';
}
}
(void) DrawImage(image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
MagickExport Image *HoughLineImage(const Image *image,const size_t width,
const size_t height,const size_t threshold,ExceptionInfo *exception)
{
#define HoughLineImageTag "HoughLine/Image"
CacheView
*image_view;
char
message[MagickPathExtent],
path[MagickPathExtent];
const char
*artifact;
double
hough_height;
Image
*lines_image = NULL;
ImageInfo
*image_info;
int
file;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*accumulator;
PointInfo
center;
register ssize_t
y;
size_t
accumulator_height,
accumulator_width,
line_count;
/*
Create the accumulator.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
accumulator_width=180;
hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ?
image->rows : image->columns))/2.0);
accumulator_height=(size_t) (2.0*hough_height);
accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height,
sizeof(double),exception);
if (accumulator == (MatrixInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (NullMatrix(accumulator) == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Populate the accumulator.
*/
status=MagickTrue;
progress=0;
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelIntensity(image,p) > (QuantumRange/2.0))
{
register ssize_t
i;
for (i=0; i < 180; i++)
{
double
count,
radius;
radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+
(((double) y-center.y)*sin(DegreesToRadians((double) i)));
(void) GetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
count++;
(void) SetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
}
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CannyEdgeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
/*
Generate line segments from accumulator.
*/
file=AcquireUniqueFileResource(path);
if (file == -1)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
(void) FormatLocaleString(message,MagickPathExtent,
"# Hough line transform: %.20gx%.20g%+.20g\n",(double) width,
(double) height,(double) threshold);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MagickPathExtent,
"viewbox 0 0 %.20g %.20g\n",(double) image->columns,(double) image->rows);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MagickPathExtent,
"# x1,y1 x2,y2 # count angle distance\n");
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
line_count=image->columns > image->rows ? image->columns/4 : image->rows/4;
if (threshold != 0)
line_count=threshold;
for (y=0; y < (ssize_t) accumulator_height; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) accumulator_width; x++)
{
double
count;
(void) GetMatrixElement(accumulator,x,y,&count);
if (count >= (double) line_count)
{
double
maxima;
SegmentInfo
line;
ssize_t
v;
/*
Is point a local maxima?
*/
maxima=count;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((u != 0) || (v !=0))
{
(void) GetMatrixElement(accumulator,x+u,y+v,&count);
if (count > maxima)
{
maxima=count;
break;
}
}
}
if (u < (ssize_t) (width/2))
break;
}
(void) GetMatrixElement(accumulator,x,y,&count);
if (maxima > count)
continue;
if ((x >= 45) && (x <= 135))
{
/*
y = (r-x cos(t))/sin(t)
*/
line.x1=0.0;
line.y1=((double) (y-(accumulator_height/2.0))-((line.x1-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
line.x2=(double) image->columns;
line.y2=((double) (y-(accumulator_height/2.0))-((line.x2-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
}
else
{
/*
x = (r-y cos(t))/sin(t)
*/
line.y1=0.0;
line.x1=((double) (y-(accumulator_height/2.0))-((line.y1-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
line.y2=(double) image->rows;
line.x2=((double) (y-(accumulator_height/2.0))-((line.y2-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
}
(void) FormatLocaleString(message,MagickPathExtent,
"line %g,%g %g,%g # %g %g %g\n",line.x1,line.y1,line.x2,line.y2,
maxima,(double) x,(double) y);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
}
}
}
(void) close(file);
/*
Render lines to image canvas.
*/
image_info=AcquireImageInfo();
image_info->background_color=image->background_color;
(void) FormatLocaleString(image_info->filename,MagickPathExtent,"%s",path);
artifact=GetImageArtifact(image,"background");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"background",artifact);
artifact=GetImageArtifact(image,"fill");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"fill",artifact);
artifact=GetImageArtifact(image,"stroke");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"stroke",artifact);
artifact=GetImageArtifact(image,"strokewidth");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"strokewidth",artifact);
lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception);
artifact=GetImageArtifact(image,"hough-lines:accumulator");
if ((lines_image != (Image *) NULL) &&
(IsStringTrue(artifact) != MagickFalse))
{
Image
*accumulator_image;
accumulator_image=MatrixToImage(accumulator,exception);
if (accumulator_image != (Image *) NULL)
AppendImageToList(&lines_image,accumulator_image);
}
/*
Free resources.
*/
accumulator=DestroyMatrixInfo(accumulator);
image_info=DestroyImageInfo(image_info);
(void) RelinquishUniqueFileResource(path);
return(GetFirstImageInList(lines_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M e a n S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MeanShiftImage() delineate arbitrarily shaped clusters in the image. For
% each pixel, it visits all the pixels in the neighborhood specified by
% the window centered at the pixel and excludes those that are outside the
% radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those
% that are within the specified color distance from the current mean, and
% computes a new x,y centroid from those coordinates and a new mean. This new
% x,y centroid is used as the center for a new window. This process iterates
% until it converges and the final mean is replaces the (original window
% center) pixel value. It repeats this process for the next pixel, etc.,
% until it processes all pixels in the image. Results are typically better with
% colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr.
%
% The format of the MeanShiftImage method is:
%
% Image *MeanShiftImage(const Image *image,const size_t width,
% const size_t height,const double color_distance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find pixels in this neighborhood.
%
% o color_distance: the color distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MeanShiftImage(const Image *image,const size_t width,
const size_t height,const double color_distance,ExceptionInfo *exception)
{
#define MaxMeanShiftIterations 100
#define MeanShiftImageTag "MeanShift/Image"
CacheView
*image_view,
*mean_view,
*pixel_view;
Image
*mean_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
mean_image=CloneImage(image,0,0,MagickTrue,exception);
if (mean_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(mean_image,DirectClass,exception) == MagickFalse)
{
mean_image=DestroyImage(mean_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
pixel_view=AcquireVirtualCacheView(image,exception);
mean_view=AcquireAuthenticCacheView(mean_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,progress) \
magick_number_threads(mean_image,mean_image,mean_image->rows,1)
#endif
for (y=0; y < (ssize_t) mean_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) mean_image->columns; x++)
{
PixelInfo
mean_pixel,
previous_pixel;
PointInfo
mean_location,
previous_location;
register ssize_t
i;
GetPixelInfo(image,&mean_pixel);
GetPixelInfoPixel(image,p,&mean_pixel);
mean_location.x=(double) x;
mean_location.y=(double) y;
for (i=0; i < MaxMeanShiftIterations; i++)
{
double
distance,
gamma;
PixelInfo
sum_pixel;
PointInfo
sum_location;
ssize_t
count,
v;
sum_location.x=0.0;
sum_location.y=0.0;
GetPixelInfo(image,&sum_pixel);
previous_location=mean_location;
previous_pixel=mean_pixel;
count=0;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2)))
{
PixelInfo
pixel;
status=GetOneCacheViewVirtualPixelInfo(pixel_view,(ssize_t)
MagickRound(mean_location.x+u),(ssize_t) MagickRound(
mean_location.y+v),&pixel,exception);
distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+
(mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+
(mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue);
if (distance <= (color_distance*color_distance))
{
sum_location.x+=mean_location.x+u;
sum_location.y+=mean_location.y+v;
sum_pixel.red+=pixel.red;
sum_pixel.green+=pixel.green;
sum_pixel.blue+=pixel.blue;
sum_pixel.alpha+=pixel.alpha;
count++;
}
}
}
}
gamma=1.0/count;
mean_location.x=gamma*sum_location.x;
mean_location.y=gamma*sum_location.y;
mean_pixel.red=gamma*sum_pixel.red;
mean_pixel.green=gamma*sum_pixel.green;
mean_pixel.blue=gamma*sum_pixel.blue;
mean_pixel.alpha=gamma*sum_pixel.alpha;
distance=(mean_location.x-previous_location.x)*
(mean_location.x-previous_location.x)+
(mean_location.y-previous_location.y)*
(mean_location.y-previous_location.y)+
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)*
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)*
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)*
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue);
if (distance <= 3.0)
break;
}
SetPixelRed(mean_image,ClampToQuantum(mean_pixel.red),q);
SetPixelGreen(mean_image,ClampToQuantum(mean_pixel.green),q);
SetPixelBlue(mean_image,ClampToQuantum(mean_pixel.blue),q);
SetPixelAlpha(mean_image,ClampToQuantum(mean_pixel.alpha),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(mean_image);
}
if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MeanShiftImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
mean_view=DestroyCacheView(mean_view);
pixel_view=DestroyCacheView(pixel_view);
image_view=DestroyCacheView(image_view);
return(mean_image);
}
|
GB_binop__div_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__div_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__div_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__div_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_int32)
// A*D function (colscale): GB (_AxD__div_int32)
// D*A function (rowscale): GB (_DxB__div_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__div_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__div_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_int32)
// C=scalar+B GB (_bind1st__div_int32)
// C=scalar+B' GB (_bind1st_tran__div_int32)
// C=A+scalar GB (_bind2nd__div_int32)
// C=A'+scalar GB (_bind2nd_tran__div_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 32)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (x, y, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_INT32 || GxB_NO_DIV_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__div_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (x, bij, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (aij, y, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (x, aij, 32) ; \
}
GrB_Info GB (_bind1st_tran__div_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, y, 32) ; \
}
GrB_Info GB (_bind2nd_tran__div_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
falcon.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <mkl.h>
#include <string.h>
#include <hbwmalloc.h>
#include <assert.h>
#include <falcon.h>
const long MAX_TILES = (MAX_IROWS-2)*(MAX_IROWS-2)*0.25;
// STRIDE is the max image*C*batch for image
const long STRIDE = (MAX_BATCH)*(MAX_IMAGE_CHANNELS+18)*(MAX_TILES+13);
// FSTRIDE is the max C*K for filter
const long FSTRIDE = (MAX_FILTER_CHANNELS+1)*(MAX_FILTERS+1);
float* t_filter;
float* t_image;
float* c_out;
// setup scratch memory used in the algorithm
void falcon_init_lib(){
int ret;
ret = hbw_posix_memalign((void*)&t_filter, 64, 16*FSTRIDE*sizeof(float));
assert(t_filter != NULL);
ret = hbw_posix_memalign((void*)&t_image, 64, 16*STRIDE*sizeof(float));
assert(t_image != NULL);
ret = hbw_posix_memalign((void*)&c_out, 64, 16*STRIDE*sizeof(float));
assert(c_out != NULL);
}
// free up the scratch pad
void falcon_free_lib(){
hbw_free(t_filter);
hbw_free(t_image);
hbw_free(c_out);
}
// INTERNAL FUNCTION : FORM MATRIX A from input data, also includes transformation
static void get_tiles(const float* restrict image, const int ldi, const int irows, const int sizeI, const int C, float* restrict otile, const int N, const int ntiles){
int t, u;
#pragma omp parallel for
for(t = 0; t < N*C; t++){
int i, j, x;
float tmp[16] __attribute__((aligned(64)));
float s[16] __attribute__((aligned(64)));
const float* data = image+t*sizeI;
int tile_count = t*ntiles;
// work on one image plane at a time, irrespective of the order
for(i = 0; i < irows-2; i += 2){
#pragma unroll(4)
for(j = 0; j < (irows-2); j += 2){
tmp[0 :4] =data[(i+0)*ldi+j:4];
tmp[4 :4] =data[(i+1)*ldi+j:4];
tmp[8 :4] =data[(i+2)*ldi+j:4];
tmp[12:4] =data[(i+3)*ldi+j:4];
// The tranformation manually simplified
s[0 ] =(tmp[0] - tmp[8 ]) - (tmp[2 ]- tmp[10]);
s[1 ] =(tmp[1] - tmp[9 ]) + (tmp[2 ]- tmp[10]);
s[2 ] =(tmp[2] - tmp[10]) - (tmp[1 ]- tmp[9 ]);
s[3 ] =(tmp[1] - tmp[9 ]) - (tmp[3 ]- tmp[11]);
s[4 ] =(tmp[4] + tmp[8 ]) - (tmp[6 ]+ tmp[10]);
s[5 ] =(tmp[5] + tmp[9 ]) + (tmp[6 ]+ tmp[10]);
s[6 ] =(tmp[6] + tmp[10]) - (tmp[5 ]+ tmp[9 ]);
s[7 ] =(tmp[5] + tmp[9 ]) - (tmp[7 ]+ tmp[11]);
s[8 ] =(tmp[8] - tmp[4 ]) - (tmp[10]- tmp[6 ]);
s[9 ] =(tmp[9] - tmp[5 ]) + (tmp[10]- tmp[6 ]);
s[10] =(tmp[10]- tmp[6 ]) - (tmp[9 ]- tmp[5 ]);
s[11] =(tmp[9] - tmp[5 ]) - (tmp[11]- tmp[7 ]);
s[12] =(tmp[4] - tmp[12]) - (tmp[6 ]- tmp[14]);
s[13] =(tmp[5] - tmp[13]) + (tmp[6 ]- tmp[14]);
s[14] =(tmp[6] - tmp[14]) - (tmp[5 ]- tmp[13]);
s[15] =(tmp[5] - tmp[13]) - (tmp[7 ]- tmp[15]);
// manually unrolled scatter to get max performance
otile[tile_count+0*STRIDE ] = s[0 ];
otile[tile_count+1*STRIDE ] = s[1 ];
otile[tile_count+2*STRIDE ] = s[2 ];
otile[tile_count+3*STRIDE ] = s[3 ];
otile[tile_count+4*STRIDE ] = s[4 ];
otile[tile_count+5*STRIDE ] = s[5 ];
otile[tile_count+6*STRIDE ] = s[6 ];
otile[tile_count+7*STRIDE ] = s[7 ];
otile[tile_count+8*STRIDE ] = s[8 ];
otile[tile_count+9*STRIDE ] = s[9 ];
otile[tile_count+10*STRIDE] = s[10];
otile[tile_count+11*STRIDE] = s[11];
otile[tile_count+12*STRIDE] = s[12];
otile[tile_count+13*STRIDE] = s[13];
otile[tile_count+14*STRIDE] = s[14];
otile[tile_count+15*STRIDE] = s[15];
tile_count++;
}
}
}
}
// INTERNAL FUNCTION: FORM MATRIX B, also includes filter transform
static void filter_transform(const float* restrict filter, const int C, const int K, float* restrict out){
int m, n, x;
const float *F;
#pragma omp parallel for collapse(2) private(m, n, x, F)
for(m = 0; m < K; m++){
for(n = 0; n < C; n++){
float c1[16] __attribute__((aligned(64)));
F = filter+n*3*3 + m*3*3*C;
// work on in 3x3 plane at a time
// The tranformation manually simplified
c1[0] = F[0];
c1[1] = (F[0]+F[2]+F[1])*0.5f;
c1[2] = (F[0]+F[2]-F[1])*0.5f;
c1[3] = F[2];
c1[4] = (F[0]+F[6]+F[3])*0.5f;
c1[5] = ((F[0]+F[6]+F[3])+(F[2]+F[8]+F[5])+(F[1]+F[7]+F[4]))*0.25f;
c1[6] = ((F[0]+F[6]+F[3])+(F[2]+F[8]+F[5])-(F[1]+F[7]+F[4]))*0.25f;
c1[7] = (F[2]+F[8]+F[5])*0.5f;
c1[8] = (F[0]+F[6]-F[3])*0.5f;
c1[9] = ((F[0]+F[6]-F[3])+(F[2]+F[8]-F[5])+(F[1]+F[7]-F[4]))*0.25f;
c1[10] = ((F[0]+F[6]-F[3])+(F[2]+F[8]-F[5])-(F[1]+F[7]-F[4]))*0.25f;
c1[11] = (F[2]+F[8]-F[5])*0.5f;
c1[12] = F[6];
c1[13] = (F[6]+F[8]+F[7])*0.5f;
c1[14] = (F[6]+F[8]-F[7])*0.5f;
c1[15] = F[8];
// scatter
#pragma unroll(16)
for(x = 0; x < 16; x++){
out[x*FSTRIDE+m*C+n] = c1[x];
}
}
}
}
// INTERNAL FUNCTION
// GEMM specific to Ist layer of VGG with (M, N, K) = (12544, 64, 3)
// MKL performs bad
static void gemm_ker(int m, int n, int k, const float* a, const int lda, const float* b, const int ldb, float* c, const int ldc){
const int BLK = 16;
int x, xx, y, z, i;
for(z = 0; z < n; z++){
for(x = 0; x < m; x += BLK){
float p[BLK] __attribute__((aligned(64)));
p[0:BLK] = 0.0f;
#pragma unroll(3)
for(y = 0; y < 3; y++){
#pragma vector aligned
for(i = 0; i < BLK; i++){
p[i] += a[x+i+y*lda]*b[y+z*ldb];
}
}
c[x+z*ldc:BLK] = p[0:BLK];
}
}
}
// INTERNAL FUNCTION
// C = A*B with beta = 0.0f and alpha = 1.0f
// Number of gemm calls is 16*BATCH
static void batched_gemm(const float* restrict image, const int irows, const int icols, const float* restrict filter, const int frows, const int fcols, float* restrict out, const int batch){
int t, i;
const char trans ='n';
const float alpha = 1.0;
const float beta = 0.0;
const int ldi = irows;
const int ldf = frows;
const int ldo = irows;
#pragma omp parallel for collapse(2) private(t, i)
for(i = 0; i < 16; i++){
for(t = 0; t < batch; t++){
const float* im = image+i*STRIDE+t*irows*icols;
const float* fi = filter+i*FSTRIDE;
float* ot = out+i*STRIDE+t*irows*fcols;
if(icols == 3) gemm_ker(irows, fcols, icols, im, ldi, fi, ldf, ot, ldo);
else sgemm(&trans, &trans, &irows, &fcols, &icols, &alpha, im, &ldi, fi, &ldf, &beta, ot, &ldo);
}
}
}
static void out_transform(const float* restrict d, const int K, const int ntiles, float* restrict out, const int ldo, const int oH, const int oW, const int N){
int t;
int sizeO = oH*oW;
#pragma omp parallel for
for(t = 0; t < N*K; t++){
float c1[16] __attribute__((aligned(64)));
float temp[8] __attribute__((aligned(64)));
float c2[4] __attribute__((aligned(64)));
float* data = out +t*sizeO;
int tile_offset = t*ntiles;
int i, j;
// work on one output plane at a time, irrespective of the order
for(i = 0; i < oH; i += 2){
for(j = 0; j < oW; j += 2){
// gather the 16 elements form C to form a tile
c1[0 ] = d[tile_offset+0 *STRIDE];
c1[1 ] = d[tile_offset+1 *STRIDE];
c1[2 ] = d[tile_offset+2 *STRIDE];
c1[3 ] = d[tile_offset+3 *STRIDE];
c1[4 ] = d[tile_offset+4 *STRIDE];
c1[5 ] = d[tile_offset+5 *STRIDE];
c1[6 ] = d[tile_offset+6 *STRIDE];
c1[7 ] = d[tile_offset+7 *STRIDE];
c1[8 ] = d[tile_offset+8 *STRIDE];
c1[9 ] = d[tile_offset+9 *STRIDE];
c1[10] = d[tile_offset+10*STRIDE];
c1[11] = d[tile_offset+11*STRIDE];
c1[12] = d[tile_offset+12*STRIDE];
c1[13] = d[tile_offset+13*STRIDE];
c1[14] = d[tile_offset+14*STRIDE];
c1[15] = d[tile_offset+15*STRIDE];
// The tranformation manually simplified
temp[0] = c1[0]+c1[1]+ c1[2];
temp[1] = c1[1]-c1[2]- c1[3];
temp[2] = c1[4]+c1[5]+ c1[6];
temp[3] = c1[5]-c1[6]- c1[7];
temp[4] = c1[8]+c1[9]+ c1[10];
temp[5] = c1[9]-c1[10]- c1[11];
temp[6] = c1[12]+c1[13]+ c1[14];
temp[7] = c1[13]-c1[14]- c1[15];
c2[0] = temp[0]+temp[2]+temp[4];
c2[1] = temp[1]+temp[3]+temp[5];
c2[2] = temp[2]-temp[4]-temp[6];
c2[3] = temp[3]-temp[5]-temp[7];
data[i*ldo+j] =c2[0];
data[i*ldo+j+1] =c2[1];
data[(i+1)*ldo+j] = c2[2];
data[(i+1)*ldo+j+1] = c2[3];
tile_offset++;
}
}
}
}
void fal_conv(const int M, float* restrict image, const int irows, const int C, float* restrict filter, const int K, const int batch, float* restrict out){
const int outHeight = irows-2;
const int outWidth = irows-2;
const int sizeI = irows*irows;
const int tiles = (outHeight)*0.5*(outWidth)*0.5;
filter_transform(filter, C, K, t_filter);
get_tiles(image, irows, irows, sizeI, C, t_image, batch, tiles);
batched_gemm(t_image, M*tiles, C, t_filter, C, K, c_out, batch/M);
out_transform(c_out, K, tiles, out, outWidth, outHeight, outWidth, batch);
}
|
geo_particle_iter.kernel_runtime.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "local_header.h"
#include "openmp_pscmc_inc.h"
#include "geo_particle_iter.kernel_inc.h"
int openmp_geo_rel_1st_bwd_init (openmp_pscmc_env * pe ,openmp_geo_rel_1st_bwd_struct * kerstr ){
return 0 ;}
void openmp_geo_rel_1st_bwd_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_geo_rel_1st_bwd_struct ));
}
int openmp_geo_rel_1st_bwd_get_num_compute_units (openmp_geo_rel_1st_bwd_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_geo_rel_1st_bwd_get_xlen (){
return 1 ;}
int openmp_geo_rel_1st_bwd_exec (openmp_geo_rel_1st_bwd_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_geo_rel_1st_bwd_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->FoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_inoutput (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_xyzw (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_cu_cache (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_cu_xyzw (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_xoffset (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_yoffset (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_zoffset (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_fieldE (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_fieldB (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_fieldB1 (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB1 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_FoutJ (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_XLEN (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_YLEN (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_ZLEN (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_ovlp (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_numvec (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_num_ele (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_grid_cache_len (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_cu_cache_length (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_DELTA_X (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_DELTA_Y (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_DELTA_Z (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_Mass0 (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_Charge0 (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_Deltat (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_Tori_X0 (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Tori_X0 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_scmc_set_parameter_Solve_Err (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Solve_Err = pm->d_data);
}
int openmp_geo_rel_1st_fwd_init (openmp_pscmc_env * pe ,openmp_geo_rel_1st_fwd_struct * kerstr ){
return 0 ;}
void openmp_geo_rel_1st_fwd_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_geo_rel_1st_fwd_struct ));
}
int openmp_geo_rel_1st_fwd_get_num_compute_units (openmp_geo_rel_1st_fwd_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_geo_rel_1st_fwd_get_xlen (){
return 1 ;}
int openmp_geo_rel_1st_fwd_exec (openmp_geo_rel_1st_fwd_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_geo_rel_1st_fwd_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->FoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_inoutput (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_xyzw (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_cu_cache (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_cu_xyzw (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_xoffset (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_yoffset (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_zoffset (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_fieldE (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_fieldB (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_fieldB1 (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB1 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_FoutJ (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->FoutJ = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_XLEN (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_YLEN (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_ZLEN (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_ovlp (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_numvec (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_num_ele (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_grid_cache_len (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_cu_cache_length (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_DELTA_X (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_DELTA_Y (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_DELTA_Z (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_Mass0 (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_Charge0 (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_Deltat (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_Tori_X0 (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Tori_X0 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_scmc_set_parameter_Solve_Err (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Solve_Err = pm->d_data);
}
|
nvector_openmpdev.c | /* -----------------------------------------------------------------
* Programmer(s): David J. Gardner and Shelby Lockhart @ LLNL
* -----------------------------------------------------------------
* Acknowledgements: This NVECTOR module is based on the NVECTOR
* Serial module by Scott D. Cohen, Alan C.
* Hindmarsh, Radu Serban, and Aaron Collier
* @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2022, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for an OpenMP DEV implementation
* of the NVECTOR module.
* -----------------------------------------------------------------*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <nvector/nvector_openmpdev.h>
#include <sundials/sundials_math.h>
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define ONEPT5 RCONST(1.5)
/* Private functions for special cases of vector operations */
static void VCopy_OpenMPDEV(N_Vector x, N_Vector z); /* z=x */
static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */
static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */
static void VNeg_OpenMPDEV(N_Vector x, N_Vector z); /* z=-x */
static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */
static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */
static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */
static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */
static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */
static void VScaleBy_OpenMPDEV(realtype a, N_Vector x); /* x <- ax */
/* Private functions for special cases of vector array operations */
static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */
static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */
static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */
static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */
static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */
static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */
static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */
/*
* -----------------------------------------------------------------
* exported functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------
* Returns vector type ID. Used to identify vector implementation
* from abstract N_Vector interface.
*/
N_Vector_ID N_VGetVectorID_OpenMPDEV(N_Vector v)
{
return SUNDIALS_NVEC_OPENMPDEV;
}
/* ----------------------------------------------------------------------------
* Function to create a new empty vector
*/
N_Vector N_VNewEmpty_OpenMPDEV(sunindextype length, SUNContext sunctx)
{
N_Vector v;
N_VectorContent_OpenMPDEV content;
/* Create an empty vector object */
v = NULL;
v = N_VNewEmpty(sunctx);
if (v == NULL) return(NULL);
/* Attach operations */
/* constructors, destructors, and utility operations */
v->ops->nvgetvectorid = N_VGetVectorID_OpenMPDEV;
v->ops->nvclone = N_VClone_OpenMPDEV;
v->ops->nvcloneempty = N_VCloneEmpty_OpenMPDEV;
v->ops->nvdestroy = N_VDestroy_OpenMPDEV;
v->ops->nvspace = N_VSpace_OpenMPDEV;
v->ops->nvgetlength = N_VGetLength_OpenMPDEV;
v->ops->nvgetarraypointer = N_VGetHostArrayPointer_OpenMPDEV;
v->ops->nvgetdevicearraypointer = N_VGetDeviceArrayPointer_OpenMPDEV;
v->ops->nvprint = N_VPrint_OpenMPDEV;
v->ops->nvprintfile = N_VPrintFile_OpenMPDEV;
/* standard vector operations */
v->ops->nvlinearsum = N_VLinearSum_OpenMPDEV;
v->ops->nvconst = N_VConst_OpenMPDEV;
v->ops->nvprod = N_VProd_OpenMPDEV;
v->ops->nvdiv = N_VDiv_OpenMPDEV;
v->ops->nvscale = N_VScale_OpenMPDEV;
v->ops->nvabs = N_VAbs_OpenMPDEV;
v->ops->nvinv = N_VInv_OpenMPDEV;
v->ops->nvaddconst = N_VAddConst_OpenMPDEV;
v->ops->nvdotprod = N_VDotProd_OpenMPDEV;
v->ops->nvmaxnorm = N_VMaxNorm_OpenMPDEV;
v->ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMPDEV;
v->ops->nvwrmsnorm = N_VWrmsNorm_OpenMPDEV;
v->ops->nvmin = N_VMin_OpenMPDEV;
v->ops->nvwl2norm = N_VWL2Norm_OpenMPDEV;
v->ops->nvl1norm = N_VL1Norm_OpenMPDEV;
v->ops->nvcompare = N_VCompare_OpenMPDEV;
v->ops->nvinvtest = N_VInvTest_OpenMPDEV;
v->ops->nvconstrmask = N_VConstrMask_OpenMPDEV;
v->ops->nvminquotient = N_VMinQuotient_OpenMPDEV;
/* fused and vector array operations are disabled (NULL) by default */
/* local reduction operations */
v->ops->nvdotprodlocal = N_VDotProd_OpenMPDEV;
v->ops->nvmaxnormlocal = N_VMaxNorm_OpenMPDEV;
v->ops->nvminlocal = N_VMin_OpenMPDEV;
v->ops->nvl1normlocal = N_VL1Norm_OpenMPDEV;
v->ops->nvinvtestlocal = N_VInvTest_OpenMPDEV;
v->ops->nvconstrmasklocal = N_VConstrMask_OpenMPDEV;
v->ops->nvminquotientlocal = N_VMinQuotient_OpenMPDEV;
v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_OpenMPDEV;
v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_OpenMPDEV;
/* single buffer reduction operations */
v->ops->nvdotprodmultilocal = N_VDotProdMulti_OpenMPDEV;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = length;
content->own_data = SUNFALSE;
content->host_data = NULL;
content->dev_data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a new vector
*/
N_Vector N_VNew_OpenMPDEV(sunindextype length)
{
N_Vector v;
realtype *data;
realtype *dev_data;
int dev;
v = NULL;
v = N_VNewEmpty_OpenMPDEV(length);
if (v == NULL) return(NULL);
/* Create data */
if (length > 0) {
/* Update ownership */
NV_OWN_DATA_OMPDEV(v) = SUNTRUE;
/* Allocate memory on host */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if (data == NULL) { N_VDestroy(v); return(NULL); }
/* Allocate memory on device */
dev = omp_get_default_device();
dev_data = omp_target_alloc(length * sizeof(realtype), dev);
if (dev_data == NULL) { N_VDestroy(v); return(NULL); }
/* Attach data */
NV_DATA_HOST_OMPDEV(v) = data;
NV_DATA_DEV_OMPDEV(v) = dev_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a vector with user data component
*/
N_Vector N_VMake_OpenMPDEV(sunindextype length, realtype *h_vdata,
realtype *d_vdata)
{
N_Vector v;
int dev, host;
if (h_vdata == NULL || d_vdata == NULL) return(NULL);
v = NULL;
v = N_VNewEmpty_OpenMPDEV(length);
if (v == NULL) return(NULL);
if (length > 0) {
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Attach data */
NV_OWN_DATA_OMPDEV(v) = SUNFALSE;
NV_DATA_HOST_OMPDEV(v) = h_vdata;
NV_DATA_DEV_OMPDEV(v) = d_vdata;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors.
*/
N_Vector *N_VCloneVectorArray_OpenMPDEV(int count, N_Vector w)
{
return(N_VCloneVectorArray(count, w));
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors with NULL data array.
*/
N_Vector *N_VCloneVectorArrayEmpty_OpenMPDEV(int count, N_Vector w)
{
return(N_VCloneEmptyVectorArray(count, w));
}
/* ----------------------------------------------------------------------------
* Function to free an array created with N_VCloneVectorArray_OpenMPDEV
*/
void N_VDestroyVectorArray_OpenMPDEV(N_Vector *vs, int count)
{
N_VDestroyVectorArray(vs, count);
return;
}
/* ----------------------------------------------------------------------------
* Function to return number of vector elements
*/
sunindextype N_VGetLength_OpenMPDEV(N_Vector v)
{
return NV_LENGTH_OMPDEV(v);
}
/* ----------------------------------------------------------------------------
* Function to return a pointer to the data array on the host.
*/
realtype *N_VGetHostArrayPointer_OpenMPDEV(N_Vector v)
{
return((realtype *) NV_DATA_HOST_OMPDEV(v));
}
/* ----------------------------------------------------------------------------
* Function to return a pointer to the data array on the device.
*/
realtype *N_VGetDeviceArrayPointer_OpenMPDEV(N_Vector v)
{
return((realtype *) NV_DATA_DEV_OMPDEV(v));
}
/* ----------------------------------------------------------------------------
* Function to print a vector to stdout
*/
void N_VPrint_OpenMPDEV(N_Vector x)
{
N_VPrintFile_OpenMPDEV(x, stdout);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to outfile
*/
void N_VPrintFile_OpenMPDEV(N_Vector x, FILE *outfile)
{
sunindextype i, N;
realtype *xd;
xd = NULL;
N = NV_LENGTH_OMPDEV(x);
xd = NV_DATA_HOST_OMPDEV(x);
for (i = 0; i < N; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
STAN_SUNDIALS_FPRINTF(outfile, "%11.8Lg\n", xd[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]);
#else
STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]);
#endif
}
STAN_SUNDIALS_FPRINTF(outfile, "\n");
return;
}
/* ----------------------------------------------------------------------------
* Function to copy host array into device array
*/
void N_VCopyToDevice_OpenMPDEV(N_Vector x)
{
int dev, host;
sunindextype length;
realtype *host_ptr;
realtype *dev_ptr;
/* Get array information */
length = NV_LENGTH_OMPDEV(x);
host_ptr = NV_DATA_HOST_OMPDEV(x);
dev_ptr = NV_DATA_DEV_OMPDEV(x);
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Copy array from host to device */
omp_target_memcpy(dev_ptr, host_ptr, sizeof(realtype) * length, 0, 0, dev, host);
return;
}
/* ----------------------------------------------------------------------------
* Function to copy device array into host array
*/
void N_VCopyFromDevice_OpenMPDEV(N_Vector x)
{
int dev, host;
sunindextype length;
realtype *host_ptr;
realtype *dev_ptr;
/* Get array information */
length = NV_LENGTH_OMPDEV(x);
host_ptr = NV_DATA_HOST_OMPDEV(x);
dev_ptr = NV_DATA_DEV_OMPDEV(x);
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Copy array from device to host */
omp_target_memcpy(host_ptr, dev_ptr, sizeof(realtype) * length, 0, 0, host, dev);
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Create new vector from existing vector without attaching data
*/
N_Vector N_VCloneEmpty_OpenMPDEV(N_Vector w)
{
N_Vector v;
N_VectorContent_OpenMPDEV content;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = N_VNewEmpty(w->sunctx);
if (v == NULL) return(NULL);
/* Attach operations */
if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); }
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = NV_LENGTH_OMPDEV(w);
content->own_data = SUNFALSE;
content->host_data = NULL;
content->dev_data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Create new vector from existing vector and attach data
*/
N_Vector N_VClone_OpenMPDEV(N_Vector w)
{
N_Vector v;
realtype *data;
realtype *dev_data;
sunindextype length;
int dev;
v = NULL;
v = N_VCloneEmpty_OpenMPDEV(w);
if (v == NULL) return(NULL);
length = NV_LENGTH_OMPDEV(w);
/* Create data */
if (length > 0) {
/* Update ownership flag */
NV_OWN_DATA_OMPDEV(v) = SUNTRUE;
/* Allocate memory on host */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if (data == NULL) { N_VDestroy(v); return(NULL); }
/* Allocate memory on device */
dev = omp_get_default_device();
dev_data = omp_target_alloc(length * sizeof(realtype), dev);
if (dev_data == NULL) { N_VDestroy(v); return(NULL); }
/* Attach data */
NV_DATA_HOST_OMPDEV(v)= data;
NV_DATA_DEV_OMPDEV(v) = dev_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Destroy vector and free vector memory
*/
void N_VDestroy_OpenMPDEV(N_Vector v)
{
int dev;
if (v == NULL) return;
/* free content */
if (v->content != NULL) {
/* free data arrays if they are owned by the vector */
if (NV_OWN_DATA_OMPDEV(v)) {
if (NV_DATA_HOST_OMPDEV(v) != NULL) {
free(NV_DATA_HOST_OMPDEV(v));
NV_DATA_HOST_OMPDEV(v) = NULL;
}
if (NV_DATA_DEV_OMPDEV(v) != NULL) {
dev = omp_get_default_device();
omp_target_free(NV_DATA_DEV_OMPDEV(v), dev);
NV_DATA_DEV_OMPDEV(v) = NULL;
}
}
free(v->content);
v->content = NULL;
}
/* free ops and vector */
if (v->ops != NULL) { free(v->ops); v->ops = NULL; }
free(v); v = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Get storage requirement for N_Vector
*/
void N_VSpace_OpenMPDEV(N_Vector v, sunindextype *lrw, sunindextype *liw)
{
*lrw = NV_LENGTH_OMPDEV(v);
*liw = 1;
return;
}
/* ----------------------------------------------------------------------------
* Compute linear combination z[i] = a*x[i]+b*y[i]
*/
void N_VLinearSum_OpenMPDEV(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype c, *xd_dev, *yd_dev, *zd_dev;
N_Vector v1, v2;
booleantype test;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */
Vaxpy_OpenMPDEV(a,x,y);
return;
}
if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */
Vaxpy_OpenMPDEV(b,y,x);
return;
}
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE)) {
VSum_OpenMPDEV(x, y, z);
return;
}
/* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
v1 = test ? y : x;
v2 = test ? x : y;
VDiff_OpenMPDEV(v2, v1, z);
return;
}
/* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin1_OpenMPDEV(c, v1, v2, z);
return;
}
/* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin2_OpenMPDEV(c, v1, v2, z);
return;
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b) {
VScaleSum_OpenMPDEV(a, x, y, z);
return;
}
/* Case: a == -b */
if (a == -b) {
VScaleDiff_OpenMPDEV(a, x, y, z);
return;
}
/* Do all cases not handled above:
(1) a == other, b == 0.0 - user should have called N_VScale
(2) a == 0.0, b == other - user should have called N_VScale
(3) a,b == other, a !=b, a != -b */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])+(b*yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Assigns constant value to all vector elements, z[i] = c
*/
void N_VConst_OpenMPDEV(realtype c, N_Vector z)
{
sunindextype i, N;
realtype *zd_dev;
int dev;
zd_dev = NULL;
N = NV_LENGTH_OMPDEV(z);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++) zd_dev[i] = c;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise product z[i] = x[i]*y[i]
*/
void N_VProd_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]*yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise division z[i] = x[i]/y[i]
*/
void N_VDiv_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]/yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaler multiplication z[i] = c*x[i]
*/
void N_VScale_OpenMPDEV(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
if (z == x) { /* BLAS usage: scale x <- cx */
VScaleBy_OpenMPDEV(c, x);
return;
}
if (c == ONE) {
VCopy_OpenMPDEV(x, z);
} else if (c == -ONE) {
VNeg_OpenMPDEV(x, z);
} else {
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*xd_dev[i];
}
return;
}
/* ----------------------------------------------------------------------------
* Compute absolute value of vector components z[i] = SUNRabs(x[i])
*/
void N_VAbs_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = SUNRabs(xd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = 1 / x[i]
*/
void N_VInv_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = ONE/xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise addition of a scaler to a vector z[i] = x[i] + b
*/
void N_VAddConst_OpenMPDEV(N_Vector x, realtype b, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]+b;
return;
}
/* ----------------------------------------------------------------------------
* Computes the dot product of two vectors, a = sum(x[i]*y[i])
*/
realtype N_VDotProd_OpenMPDEV(N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype sum, *xd_dev, *yd_dev;
int dev;
xd_dev = yd_dev = NULL;
sum = ZERO;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += xd_dev[i]*yd_dev[i];
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes max norm of a vector
*/
realtype N_VMaxNorm_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype max, *xd_dev;
int dev;
max = ZERO;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(tofrom:max) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(max:max) schedule(static, 1)
for (i = 0; i < N; i++) {
max = SUNMAX(SUNRabs(xd_dev[i]), max);
}
return(max);
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a vector
*/
realtype N_VWrmsNorm_OpenMPDEV(N_Vector x, N_Vector w)
{
return(SUNRsqrt(N_VWSqrSumLocal_OpenMPDEV(x, w)/(NV_LENGTH_OMPDEV(x))));
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a masked vector
*/
realtype N_VWrmsNormMask_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id)
{
return(SUNRsqrt(N_VWSqrSumMaskLocal_OpenMPDEV(x, w, id) / (NV_LENGTH_OMPDEV(x))));
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a vector
*/
realtype N_VWSqrSumLocal_OpenMPDEV(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a masked vector
*/
realtype N_VWSqrSumMaskLocal_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev, *idd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = idd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
idd_dev = NV_DATA_DEV_OMPDEV(id);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, wd_dev, idd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
if (idd_dev[i] > ZERO) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Finds the minimun component of a vector
*/
realtype N_VMin_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype min, *xd_dev;
int dev;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(from:min) is_device_ptr(xd_dev) device(dev)
#pragma omp teams num_teams(1)
{
min = xd_dev[0];
#pragma omp distribute parallel for reduction(min:min) schedule(static, 1)
for (i = 1; i < N; i++) {
min = SUNMIN(xd_dev[i], min);
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted L2 norm of a vector
*/
realtype N_VWL2Norm_OpenMPDEV(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
return(SUNRsqrt(sum));
}
/* ----------------------------------------------------------------------------
* Computes L1 norm of a vector
*/
realtype N_VL1Norm_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype sum, *xd_dev;
int dev;
sum = ZERO;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(tofrom:sum) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i<N; i++)
sum += SUNRabs(xd_dev[i]);
return(sum);
}
/* ----------------------------------------------------------------------------
* Compare vector component values to a scaler
*/
void N_VCompare_OpenMPDEV(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (SUNRabs(xd_dev[i]) >= c) ? ONE : ZERO;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO
*/
booleantype N_VInvTest_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev, val;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
val = ZERO;
#pragma omp target map(tofrom:val) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(max:val) schedule(static, 1)
for (i = 0; i < N; i++) {
if (xd_dev[i] == ZERO)
val = ONE;
else
zd_dev[i] = ONE/xd_dev[i];
}
if (val > ZERO)
return (SUNFALSE);
else
return (SUNTRUE);
}
/* ----------------------------------------------------------------------------
* Compute constraint mask of a vector
*/
booleantype N_VConstrMask_OpenMPDEV(N_Vector c, N_Vector x, N_Vector m)
{
sunindextype i, N;
realtype temp;
realtype *cd_dev, *xd_dev, *md_dev;
int dev;
cd_dev = xd_dev = md_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
cd_dev = NV_DATA_DEV_OMPDEV(c);
md_dev = NV_DATA_DEV_OMPDEV(m);
/* get default device identifier */
dev = omp_get_default_device();
temp = ONE;
#pragma omp target map(tofrom:temp) is_device_ptr(xd_dev, cd_dev, md_dev) device(dev)
#pragma omp teams distribute parallel for reduction(min:temp) schedule(static, 1)
for (i = 0; i < N; i++) {
md_dev[i] = ZERO;
if (cd_dev[i] == ZERO) continue;
if (cd_dev[i] > ONEPT5 || cd_dev[i] < -ONEPT5) {
if ( xd_dev[i]*cd_dev[i] <= ZERO) { temp = ZERO; md_dev[i] = ONE; }
continue;
}
if ( cd_dev[i] > HALF || cd_dev[i] < -HALF) {
if (xd_dev[i]*cd_dev[i] < ZERO ) { temp = ZERO; md_dev[i] = ONE; }
}
}
if (temp == ONE) return (SUNTRUE);
else return(SUNFALSE);
}
/* ----------------------------------------------------------------------------
* Compute minimum componentwise quotient
*/
realtype N_VMinQuotient_OpenMPDEV(N_Vector num, N_Vector denom)
{
sunindextype i, N;
realtype *nd_dev, *dd_dev, min;
int dev;
nd_dev = dd_dev = NULL;
N = NV_LENGTH_OMPDEV(num);
nd_dev = NV_DATA_DEV_OMPDEV(num);
dd_dev = NV_DATA_DEV_OMPDEV(denom);
/* get default device identifier */
dev = omp_get_default_device();
min = BIG_REAL;
#pragma omp target map(tofrom:min) is_device_ptr(nd_dev, dd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(min:min) schedule(static, 1)
for (i = 0; i < N; i++)
if (dd_dev[i] != ZERO) min = SUNMIN(nd_dev[i]/dd_dev[i], min);
return(min);
}
/*
* -----------------------------------------------------------------
* fused vector operations
* -----------------------------------------------------------------
*/
int N_VLinearCombination_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector z)
{
int i, dev;
realtype to_add; /* temporary variable to hold sum being added in atomic operation */
sunindextype j, N;
realtype* zd_dev=NULL;
realtype* xd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMPDEV(c[0], X[0], z);
return(0);
}
/* should have called N_VLinearSum */
if (nvec == 2) {
N_VLinearSum_OpenMPDEV(c[0], X[0], c[1], X[1], z);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(z);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store X dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
/*
* X[0] += c[i]*X[i], i = 1,...,nvec-1
*/
if ((X[0] == z) && (c[0] == ONE)) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
/*
* X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1
*/
if (X[0] == z) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev)
{
#pragma omp teams distribute parallel for schedule(static,1)
for (j=0; j<N; j++)
zd_dev[j] *= c[0];
}
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
/*
* z = sum{ c[i] * X[i] }, i = 0,...,nvec-1
*/
xd_dev = NV_DATA_DEV_OMPDEV(X[0]);
#pragma omp target map(to:N,c[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
{
#pragma omp teams distribute parallel for schedule(static, 1)
for (j=0; j<N; j++) {
zd_dev[j] = c[0] * xd_dev[j];
}
}
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
int N_VScaleAddMulti_OpenMPDEV(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMPDEV(a[0], x, ONE, Y[0], Z[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += a[i] * xd_dev[j];
}
}
free(yd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = a[i] * xd_dev[j] + yd_dev[j];
}
}
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VDotProdMulti_OpenMPDEV(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype** yd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VDotProd */
if (nvec == 1) {
dotprods[0] = N_VDotProd_OpenMPDEV(x, Y[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize dot products */
for (i=0; i<nvec; i++) {
dotprods[i] = ZERO;
}
/* Allocate and store dev pointers to copy to device */
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
/* compute multiple dot products */
#pragma omp target map(to:N,nvec,yd_dev_ptrs[:nvec]) map(tofrom:dotprods[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
for (j=0; j<N; j++)
sum += xd_dev[j] * yd_dev[j];
dotprods[i] += sum;
}
free(yd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* vector array operations
* -----------------------------------------------------------------
*/
int N_VLinearSumVectorArray_OpenMPDEV(int nvec,
realtype a, N_Vector* X,
realtype b, N_Vector* Y,
N_Vector* Z)
{
int i, dev;
sunindextype j, N;
N_Vector* V1;
N_Vector* V2;
booleantype test;
realtype c;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMPDEV(a, X[0], b, Y[0], Z[0]);
return(0);
}
/* BLAS usage: axpy y <- ax+y */
if ((b == ONE) && (Z == Y))
return(VaxpyVectorArray_OpenMPDEV(nvec, a, X, Y));
/* BLAS usage: axpy x <- by+x */
if ((a == ONE) && (Z == X))
return(VaxpyVectorArray_OpenMPDEV(nvec, b, Y, X));
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE))
return(VSumVectorArray_OpenMPDEV(nvec, X, Y, Z));
/* Cases: */
/* (1) a == 1.0, b = -1.0, */
/* (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VDiffVectorArray_OpenMPDEV(nvec, V2, V1, Z));
}
/* Cases: */
/* (1) a == 1.0, b == other or 0.0, */
/* (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin1VectorArray_OpenMPDEV(nvec, c, V1, V2, Z));
}
/* Cases: */
/* (1) a == -1.0, b != 1.0, */
/* (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin2VectorArray_OpenMPDEV(nvec, c, V1, V2, Z));
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b)
return(VScaleSumVectorArray_OpenMPDEV(nvec, a, X, Y, Z));
/* Case: a == -b */
if (a == -b)
return(VScaleDiffVectorArray_OpenMPDEV(nvec, a, X, Y, Z));
/* Do all cases not handled above: */
/* (1) a == other, b == 0.0 - user should have called N_VScale */
/* (2) a == 0.0, b == other - user should have called N_VScale */
/* (3) a,b == other, a !=b, a != -b */
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/* compute linear sum for each vector pair in vector arrays */
#pragma omp target map(to:N,nvec,a,b,xd_dev_ptrs[:nvec], yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = a * xd_dev[j] + b * yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VScaleVectorArray_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMPDEV(c[0], X[0], Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++) {
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
}
/*
* X[i] *= c[i]
*/
if (X == Z) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
xd_dev[j] *= c[i];
}
}
free(xd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/*
* Z[i] = c[i] * X[i]
*/
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c[i] * xd_dev[j];
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VConstVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* zd_dev=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VConst */
if (nvec == 1) {
N_VConst_OpenMPDEV(c, Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get device */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/* set each vector in the vector array to a constant */
#pragma omp target map(to:N,nvec,zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c;
}
}
free(zd_dev_ptrs);
return(0);
}
int N_VWrmsNormVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W, realtype* nrm)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* wd_dev=NULL;
realtype* xd_dev=NULL;
realtype** wd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNorm_OpenMPDEV(X[0], W[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize norms */
for (i=0; i<nvec; i++)
nrm[i] = ZERO;
/* Allocate and store dev pointers to copy to device */
wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]);
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
/* compute the WRMS norm for each vector in the vector array */
#pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \
is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
wd_dev = wd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
{
for (j=0; j<N; j++)
sum += SUNSQR(xd_dev[j] * wd_dev[j]);
}
nrm[i] = SUNRsqrt(sum/N);
}
}
free(wd_dev_ptrs);
free(xd_dev_ptrs);
return(0);
}
int N_VWrmsNormMaskVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W,
N_Vector id, realtype* nrm)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* wd_dev=NULL;
realtype* xd_dev=NULL;
realtype* idd_dev=NULL;
realtype** wd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNormMask_OpenMPDEV(X[0], W[0], id);
return(0);
}
/* get vector length and mask data array */
N = NV_LENGTH_OMPDEV(X[0]);
idd_dev = NV_DATA_DEV_OMPDEV(id);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize norms */
for (i=0; i<nvec; i++)
nrm[i] = ZERO;
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]);
/* compute the WRMS norm for each vector in the vector array */
#pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \
is_device_ptr(idd_dev,xd_dev,wd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
wd_dev = wd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
{
for (j=0; j<N; j++) {
if (idd_dev[j] > ZERO)
sum += SUNSQR(xd_dev[j] * wd_dev[j]);
}
}
nrm[i] = SUNRsqrt(sum/N);
}
}
free(xd_dev_ptrs);
free(wd_dev_ptrs);
return(0);
}
int N_VScaleAddMultiVectorArray_OpenMPDEV(int nvec, int nsum, realtype* a,
N_Vector* X, N_Vector** Y, N_Vector** Z)
{
int i, j, dev;
sunindextype k, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
int retval;
N_Vector* YY;
N_Vector* ZZ;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VLinearSum */
if (nsum == 1) {
N_VLinearSum_OpenMPDEV(a[0], X[0], ONE, Y[0][0], Z[0][0]);
return(0);
}
/* should have called N_VScaleAddMulti */
YY = (N_Vector *) malloc(nsum * sizeof(N_Vector));
ZZ = (N_Vector *) malloc(nsum * sizeof(N_Vector));
for (j=0; j<nsum; j++) {
YY[j] = Y[j][0];
ZZ[j] = Z[j][0];
}
retval = N_VScaleAddMulti_OpenMPDEV(nsum, a, X[0], YY, ZZ);
free(YY);
free(ZZ);
return(retval);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VLinearSumVectorArray */
if (nsum == 1) {
retval = N_VLinearSumVectorArray_OpenMPDEV(nvec, a[0], X, ONE, Y[0], Z[0]);
return(retval);
}
/* ----------------------------
* Compute multiple linear sums
* ---------------------------- */
/* get vector length */
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++) {
for (j=0; j<nsum; j++)
yd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Y[j][i]);
}
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum]) \
is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
for (j=0; j<nsum; j++) {
yd_dev = yd_dev_ptrs[i*nsum+j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
yd_dev[k] += a[j] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (i=0; i<nvec; i++) {
for (j=0; j<nsum; j++)
zd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Z[j][i]);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec*nsum]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
for (j=0; j<nsum; j++) {
yd_dev = yd_dev_ptrs[i*nsum+j];
zd_dev = zd_dev_ptrs[i*nsum+j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] = a[j] * xd_dev[k] + yd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VLinearCombinationVectorArray_OpenMPDEV(int nvec, int nsum,
realtype* c,
N_Vector** X,
N_Vector* Z)
{
int i; /* vector arrays index in summation [0,nsum) */
int j; /* vector index in vector array [0,nvec) */
sunindextype k; /* element index in vector [0,N) */
sunindextype N;
realtype* zd_dev=NULL;
realtype* xd_dev=NULL;
realtype** zd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
int dev;
realtype* ctmp;
N_Vector* Y;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VScale */
if (nsum == 1) {
N_VScale_OpenMPDEV(c[0], X[0][0], Z[0]);
return(0);
}
/* should have called N_VLinearSum */
if (nsum == 2) {
N_VLinearSum_OpenMPDEV(c[0], X[0][0], c[1], X[1][0], Z[0]);
return(0);
}
/* should have called N_VLinearCombination */
Y = (N_Vector *) malloc(nsum * sizeof(N_Vector));
for (i=0; i<nsum; i++) {
Y[i] = X[i][0];
}
N_VLinearCombination_OpenMPDEV(nsum, c, Y, Z[0]);
free(Y);
return(0);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VScaleVectorArray */
if (nsum == 1) {
ctmp = (realtype*) malloc(nvec * sizeof(realtype));
for (j=0; j<nvec; j++) {
ctmp[j] = c[0];
}
N_VScaleVectorArray_OpenMPDEV(nvec, ctmp, X[0], Z);
free(ctmp);
return(0);
}
/* should have called N_VLinearSumVectorArray */
if (nsum == 2) {
N_VLinearSumVectorArray_OpenMPDEV(nvec, c[0], X[0], c[1], X[1], Z);
return(0);
}
/* --------------------------
* Compute linear combination
* -------------------------- */
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
xd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (j=0; j<nvec; j++)
zd_dev_ptrs[j] = NV_DATA_DEV_OMPDEV(Z[j]);
for (j=0; j<nvec; j++) {
for (i=0; i<nsum; i++)
xd_dev_ptrs[j * nsum + i] = NV_DATA_DEV_OMPDEV(X[i][j]);
}
/*
* X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1
*/
if ((X[0] == Z) && (c[0] == ONE)) {
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
zd_dev = zd_dev_ptrs[j];
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1
*/
if (X[0] == Z) {
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
zd_dev = zd_dev_ptrs[j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] *= c[0];
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1
*/
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
/* scale first vector in the sum into the output vector */
xd_dev = xd_dev_ptrs[j*nsum];
zd_dev = zd_dev_ptrs[j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] = c[0] * xd_dev[k];
/* scale and sum remaining vectors into the output vector */
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* private functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Copy vector components into a second vector
*/
static void VCopy_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum
*/
static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]+yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference
*/
static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]-yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute the negative of a vector
*/
static void VNeg_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = -xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector sum
*/
static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*(xd_dev[i]+yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector difference
*/
static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*(xd_dev[i]-yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum z[i] = a*x[i]+y[i]
*/
static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])+yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference z[i] = a*x[i]-y[i]
*/
static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])-yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute special cases of linear sum
*/
static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev;
int dev;
xd_dev = yd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
/* get default device identifier */
dev = omp_get_default_device();
if (a == ONE) {
#pragma omp target is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] += xd_dev[i];
return;
}
if (a == -ONE) {
#pragma omp target is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] -= xd_dev[i];
return;
}
#pragma omp target is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] += a*xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector x[i] = a*x[i]
*/
static void VScaleBy_OpenMPDEV(realtype a, N_Vector x)
{
sunindextype i, N;
realtype *xd_dev;
int dev;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
xd_dev[i] *= a;
return;
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector array operations
* -----------------------------------------------------------------
*/
static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = xd_dev[j] + yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = xd_dev[j] - yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c * (xd_dev[j] + yd_dev[j]);
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev ointer to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c * (xd_dev[j] - yd_dev[j]);
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = (a * xd_dev[j]) + yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = (a * xd_dev[j]) - yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
if (a == ONE) {
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
if (a == -ONE) {
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] -= xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += a * xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* Enable / Disable fused and vector array operations
* -----------------------------------------------------------------
*/
int N_VEnableFusedOps_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
if (tf) {
/* enable all fused vector operations */
v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV;
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV;
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV;
/* enable all vector array operations */
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV;
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV;
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV;
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV;
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV;
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV;
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV;
/* enable single buffer reduction operations */
v->ops->nvdotprodmultilocal = N_VDotProdMultiLocal_OpenMPDEV;
} else {
/* disable all fused vector operations */
v->ops->nvlinearcombination = NULL;
v->ops->nvscaleaddmulti = NULL;
v->ops->nvdotprodmulti = NULL;
/* disable all vector array operations */
v->ops->nvlinearsumvectorarray = NULL;
v->ops->nvscalevectorarray = NULL;
v->ops->nvconstvectorarray = NULL;
v->ops->nvwrmsnormvectorarray = NULL;
v->ops->nvwrmsnormmaskvectorarray = NULL;
v->ops->nvscaleaddmultivectorarray = NULL;
v->ops->nvlinearcombinationvectorarray = NULL;
/* disable single buffer reduction operations */
v->ops->nvdotprodmultilocal = NULL;
}
/* return success */
return(0);
}
int N_VEnableLinearCombination_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV;
else
v->ops->nvlinearcombination = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMulti_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV;
else
v->ops->nvscaleaddmulti = NULL;
/* return success */
return(0);
}
int N_VEnableDotProdMulti_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf) {
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV;
v->ops->nvdotprodmultilocal = N_VDotProdMulti_OpenMPDEV;
} else {
v->ops->nvdotprodmulti = NULL;
v->ops->nvdotprodmultilocal = NULL;
}
/* return success */
return(0);
}
int N_VEnableLinearSumVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV;
else
v->ops->nvlinearsumvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV;
else
v->ops->nvscalevectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableConstVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV;
else
v->ops->nvconstvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV;
else
v->ops->nvwrmsnormvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormMaskVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV;
else
v->ops->nvwrmsnormmaskvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMultiVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV;
else
v->ops->nvscaleaddmultivectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableLinearCombinationVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV;
else
v->ops->nvlinearcombinationvectorarray = NULL;
/* return success */
return(0);
}
|
libperf_int.h | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2016. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/
#ifndef LIBPERF_INT_H_
#define LIBPERF_INT_H_
#include <tools/perf/api/libperf.h>
BEGIN_C_DECLS
/** @file libperf_int.h */
#include <ucs/time/time.h>
#include <ucs/async/async.h>
#if _OPENMP
#include <omp.h>
#endif
#define TIMING_QUEUE_SIZE 2048
#define UCT_PERF_TEST_AM_ID 5
#define ADDR_BUF_SIZE 2048
typedef struct ucx_perf_context ucx_perf_context_t;
typedef struct uct_peer uct_peer_t;
typedef struct ucp_perf_request ucp_perf_request_t;
typedef struct ucx_perf_thread_context ucx_perf_thread_context_t;
struct ucx_perf_allocator {
ucs_memory_type_t mem_type;
ucs_status_t (*init)(ucx_perf_context_t *perf);
ucs_status_t (*ucp_alloc)(const ucx_perf_context_t *perf, size_t length,
void **address_p, ucp_mem_h *memh, int non_blk_flag);
void (*ucp_free)(const ucx_perf_context_t *perf, void *address,
ucp_mem_h memh);
ucs_status_t (*uct_alloc)(const ucx_perf_context_t *perf, size_t length,
unsigned flags, uct_allocated_memory_t *alloc_mem);
void (*uct_free)(const ucx_perf_context_t *perf,
uct_allocated_memory_t *alloc_mem);
void (*memcpy)(void *dst, ucs_memory_type_t dst_mem_type,
const void *src, ucs_memory_type_t src_mem_type,
size_t count);
void* (*memset)(void *dst, int value, size_t count);
};
struct ucx_perf_context {
ucx_perf_params_t params;
/* Buffers */
void *send_buffer;
void *recv_buffer;
/* Measurements */
double start_time_acc; /* accurate start time */
ucs_time_t end_time; /* inaccurate end time (upper bound) */
ucs_time_t prev_time; /* time of previous iteration */
ucs_time_t report_interval; /* interval of showing report */
ucx_perf_counter_t max_iter;
/* Measurements of current/previous **report** */
struct {
ucx_perf_counter_t msgs; /* number of messages */
ucx_perf_counter_t bytes; /* number of bytes */
ucx_perf_counter_t iters; /* number of iterations */
ucs_time_t time; /* inaccurate time (for median and report interval) */
double time_acc; /* accurate time (for avg latency/bw/msgrate) */
} current, prev;
ucs_time_t timing_queue[TIMING_QUEUE_SIZE];
unsigned timing_queue_head;
const ucx_perf_allocator_t *allocator;
union {
struct {
ucs_async_context_t async;
uct_component_h cmpt;
uct_md_h md;
uct_worker_h worker;
uct_iface_h iface;
uct_peer_t *peers;
uct_allocated_memory_t send_mem;
uct_allocated_memory_t recv_mem;
uct_iov_t *iov;
} uct;
struct {
ucp_context_h context;
ucx_perf_thread_context_t* tctx;
ucp_worker_h worker;
ucp_ep_h ep;
ucp_rkey_h rkey;
unsigned long remote_addr;
ucp_mem_h send_memh;
ucp_mem_h recv_memh;
ucp_dt_iov_t *send_iov;
ucp_dt_iov_t *recv_iov;
} ucp;
};
};
struct ucx_perf_thread_context {
pthread_t pt;
int tid;
ucs_status_t status;
ucx_perf_context_t perf;
ucx_perf_result_t result;
};
struct uct_peer {
uct_ep_h ep;
unsigned long remote_addr;
uct_rkey_bundle_t rkey;
};
struct ucp_perf_request {
void *context;
};
#define UCX_PERF_TEST_FOREACH(perf) \
while (!ucx_perf_context_done(perf))
#define rte_call(_perf, _func, ...) \
((_perf)->params.rte->_func((_perf)->params.rte_group, ## __VA_ARGS__))
void ucx_perf_test_start_clock(ucx_perf_context_t *perf);
void uct_perf_iface_flush_b(ucx_perf_context_t *perf);
ucs_status_t uct_perf_test_dispatch(ucx_perf_context_t *perf);
ucs_status_t ucp_perf_test_dispatch(ucx_perf_context_t *perf);
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result);
void uct_perf_barrier(ucx_perf_context_t *perf);
void ucp_perf_barrier(ucx_perf_context_t *perf);
static UCS_F_ALWAYS_INLINE int ucx_perf_context_done(ucx_perf_context_t *perf)
{
return ucs_unlikely((perf->current.iters >= perf->max_iter) ||
(perf->current.time > perf->end_time));
}
static inline void ucx_perf_get_time(ucx_perf_context_t *perf)
{
perf->current.time_acc = ucs_get_accurate_time();
}
static inline void ucx_perf_omp_barrier()
{
#if _OPENMP
#pragma omp barrier
#endif
}
static inline void ucx_perf_update(ucx_perf_context_t *perf,
ucx_perf_counter_t iters, size_t bytes)
{
ucx_perf_result_t result;
perf->current.time = ucs_get_time();
perf->current.iters += iters;
perf->current.bytes += bytes;
perf->current.msgs += 1;
perf->timing_queue[perf->timing_queue_head] =
perf->current.time - perf->prev_time;
++perf->timing_queue_head;
if (perf->timing_queue_head == TIMING_QUEUE_SIZE) {
perf->timing_queue_head = 0;
}
perf->prev_time = perf->current.time;
if (perf->current.time - perf->prev.time >= perf->report_interval) {
ucx_perf_get_time(perf);
ucx_perf_calc_result(perf, &result);
rte_call(perf, report, &result, perf->params.report_arg, 0, 0);
perf->prev = perf->current;
}
}
/**
* Get the total length of the message size given by parameters
*/
static inline
size_t ucx_perf_get_message_size(const ucx_perf_params_t *params)
{
size_t length, it;
ucs_assert(params->msg_size_list != NULL);
length = 0;
for (it = 0; it < params->msg_size_cnt; ++it) {
length += params->msg_size_list[it];
}
return length;
}
END_C_DECLS
#endif
|
mandel-omp-for-point.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
#pragma omp parallel for collapse(2) schedule(runtime)
for (row = 0; row < height; ++row) {
for (col = 0; col < width; ++col) {
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
#pragma omp critical
{
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
testMatrixMultiply_OpenMp.c | #include "mex.h"
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define A_IN prhs[0] /* (M * P) */
#define B_IN prhs[1] /* (P * N) */
#define C_OUT plhs[0] /* (M * N) */
void mexFunction(int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[])
{
double *pA, *pB, *pC;
int i, j, k;
mwSize M, P, N;
pA = mxGetPr(A_IN);
pB = mxGetPr(B_IN);
M = mxGetM(A_IN);
P = mxGetN(A_IN);
mxAssert(P == mxGetM(B_IN), "Columns of A must be equal to rows of B!");
N = mxGetN(B_IN);
C_OUT = mxCreateDoubleMatrix(M, N, mxREAL);
pC = mxGetPr(C_OUT);
#pragma omp parallel private(i, j, k)
{
#pragma omp for schedule(dynamic)
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
for (k = 0; k < P; k++)
pC[j * M + i] += pA[k * M + i] * pB[j * P + k];
}
} |
maxwell_zeroBC.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.10 $
***********************************************************************EHEADER*/
#include "_hypre_sstruct_ls.h"
HYPRE_Int
hypre_ParVectorZeroBCValues(hypre_ParVector *v,
HYPRE_Int *rows,
HYPRE_Int nrows)
{
HYPRE_Int ierr= 0;
hypre_Vector *v_local = hypre_ParVectorLocalVector(v);
hypre_SeqVectorZeroBCValues(v_local, rows, nrows);
return ierr;
}
HYPRE_Int
hypre_SeqVectorZeroBCValues(hypre_Vector *v,
HYPRE_Int *rows,
HYPRE_Int nrows)
{
double *vector_data = hypre_VectorData(v);
HYPRE_Int i;
HYPRE_Int ierr = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < nrows; i++)
vector_data[rows[i]]= 0.0;
return ierr;
}
|
task_tied_thread_threadid.c | // RUN: %libomp-compile-and-run
// REQUIRES: abt
#include "omp_testsuite.h"
#include <string.h>
#include <stdio.h>
int test_task_tied_thread_threadid(int num_threads) {
int vals[num_threads];
memset(vals, 0, sizeof(int) * num_threads);
omp_set_max_active_levels(2);
#pragma omp parallel num_threads(num_threads / 2 + 1)
#pragma omp master
{
int i;
for (i = 0; i < num_threads; i++) {
#pragma omp task firstprivate(i)
{
int omp_thread_id = omp_get_thread_num();
ABT_thread abt_thread;
ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread));
int local_vals[num_threads];
memset(local_vals, 0, sizeof(int) * num_threads);
int j;
#pragma omp parallel for num_threads(num_threads)
for (j = 0; j < num_threads; j++) {
int l2_omp_thread_id = omp_get_thread_num();
ABT_thread l2_abt_thread;
ABT_EXIT_IF_FAIL(ABT_thread_self(&l2_abt_thread));
// Context switching in OpenMP.
#pragma omp taskyield
int l2_omp_thread_id2 = omp_get_thread_num();
if (l2_omp_thread_id == l2_omp_thread_id2) {
local_vals[j] += 1;
}
ABT_thread l2_abt_thread2;
ABT_EXIT_IF_FAIL(ABT_thread_self(&l2_abt_thread2));
ABT_bool l2_abt_thread_equal;
ABT_EXIT_IF_FAIL(ABT_thread_equal(l2_abt_thread, l2_abt_thread2,
&l2_abt_thread_equal));
if (l2_abt_thread_equal == ABT_TRUE) {
local_vals[j] += 2;
}
// Context switching in Argobots.
ABT_EXIT_IF_FAIL(ABT_thread_yield());
int l2_omp_thread_id3 = omp_get_thread_num();
if (l2_omp_thread_id2 == l2_omp_thread_id3) {
local_vals[j] += 4;
}
}
// Check child threads.
int child_fail = 0;
for (j = 0; j < num_threads; j++) {
if (local_vals[i] != 7) {
child_fail = 1;
}
}
if (!child_fail) {
vals[i] += 1;
}
int omp_thread_id2 = omp_get_thread_num();
if (omp_thread_id == omp_thread_id2) {
vals[i] += 2;
}
ABT_thread abt_thread2;
ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread2));
ABT_bool abt_thread_equal;
ABT_EXIT_IF_FAIL(ABT_thread_equal(abt_thread, abt_thread2,
&abt_thread_equal));
if (abt_thread_equal == ABT_TRUE) {
vals[i] += 4;
}
}
}
}
int index;
for (index = 0; index < num_threads; index++) {
if (vals[index] != 7) {
printf("vals[%d] == %d\n", index, vals[index]);
return 0;
}
}
return 1;
}
int main() {
int i, num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_task_tied_thread_threadid(i + 1)) {
num_failed++;
}
}
return num_failed;
}
|
dds.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/profile.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/module.h"
#include "magick/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColourLookup
{
DDSSourceBlock sources[2];
} DDSSingleColourLookup;
typedef MagickBooleanType
DDSDecoder(Image *, DDSInfo *, ExceptionInfo *);
static const DDSSingleColourLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColourLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColourLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if ((ssize_t) max - min < steps) \
max = MagickMin(min + steps, 255); \
if ((ssize_t) max - min < steps) \
min = MagickMax(0, (ssize_t) max - steps)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
static MagickBooleanType
ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3,
DDSVector4 *,DDSVector4 *,unsigned char *,size_t),
ReadDDSInfo(Image *,DDSInfo *),
ReadDXT1(Image *,DDSInfo *,ExceptionInfo *),
ReadDXT3(Image *,DDSInfo *,ExceptionInfo *),
ReadDXT5(Image *,DDSInfo *,ExceptionInfo *),
ReadUncompressedRGB(Image *,DDSInfo *,ExceptionInfo *),
ReadUncompressedRGBA(Image *,DDSInfo *,ExceptionInfo *),
SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
WriteDDSImage(const ImageInfo *,Image *),
WriteMipmaps(Image *,const size_t,const size_t,const size_t,
const MagickBooleanType,const MagickBooleanType,ExceptionInfo *);
static void
RemapIndices(const ssize_t *,const unsigned char *,unsigned char *),
WriteDDSInfo(Image *,const size_t,const size_t,const size_t),
WriteFourCC(Image *,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteIndices(Image *,const DDSVector3,const DDSVector3, unsigned char *),
WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *),
WriteUncompressed(Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
register ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
register ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColourLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
register ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
register ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z)));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
register ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status,
cubemap = MagickFalse,
volume = MagickFalse,
matte;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
(void) SeekBlob(image, 128, SEEK_SET);
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
matte = MagickTrue;
decoder = ReadUncompressedRGBA;
}
else
{
matte = MagickTrue;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
matte = MagickFalse;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
matte = MagickFalse;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
matte = MagickTrue;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
matte = MagickTrue;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
if ((num_images == 0) || (num_images > GetBlobSize(image)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse)
ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit");
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
/* Start a new image */
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->matte = matte;
image->compression = compression;
image->columns = dds_info.width;
image->rows = dds_info.height;
image->storage_class = DirectClass;
image->endian = LSBEndian;
image->depth = 8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
(void) SetImageBackgroundColor(image);
if ((decoder)(image, &dds_info, exception) != MagickTrue)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
return MagickTrue;
}
static MagickBooleanType ReadDXT1(Image *image,DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
PixelPacket
*q;
register ssize_t
i,
x;
size_t
bits;
ssize_t
j,
y;
unsigned char
code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickFalse);
if (EOFBlob(image) != MagickFalse)
break;
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if (((x + i) < (ssize_t) image->columns) &&
((y + j) < (ssize_t) image->rows))
{
code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
SetPixelOpacity(q,ScaleCharToQuantum(colors.a[code]));
if ((colors.a[code] != 0) && (image->matte == MagickFalse))
image->matte=MagickTrue; /* Correct matte */
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
if (EOFBlob(image) != MagickFalse)
break;
}
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
ssize_t
j,
y;
PixelPacket
*q;
register ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x),
MagickMin(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
break;
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
alpha));
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
if (EOFBlob(image) != MagickFalse)
break;
}
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
ssize_t
j,
y;
MagickSizeType
alpha_bits;
PixelPacket
*q;
register ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x),
MagickMin(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
break;
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
alpha));
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
if (EOFBlob(image) != MagickFalse)
break;
}
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGB(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
PixelPacket
*q;
ssize_t
x, y;
unsigned short
color;
if (dds_info->pixelformat.rgb_bitcount == 8)
(void) SetImageType(image,GrayscaleType);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
for (y = 0; y < (ssize_t) dds_info->height; y++)
{
q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
for (x = 0; x < (ssize_t) dds_info->width; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
SetPixelGray(q,ScaleCharToQuantum(ReadBlobByte(image)));
else if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)));
}
else
{
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
if (dds_info->pixelformat.rgb_bitcount == 32)
(void) ReadBlobByte(image);
}
SetPixelAlpha(q,QuantumRange);
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBA(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
PixelPacket
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleMatteType);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
for (y = 0; y < (ssize_t) dds_info->height; y++)
{
q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
for (x = 0; x < (ssize_t) dds_info->width; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(q,(color & (1 << 15)) ? QuantumRange : 0);
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)));
}
else if (alphaBits == 2)
{
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
(color >> 8)));
SetPixelGray(q,ScaleCharToQuantum((unsigned char)color));
}
else
{
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)));
}
}
else
{
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = SetMagickInfo("DDS");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->magick_module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
entry = SetMagickInfo("DXT1");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->magick_module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
entry = SetMagickInfo("DXT5");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->magick_module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
register ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
register ssize_t
i;
MagickOffsetType
offset;
size_t
h,
w;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
w = DIV2(dds_info->width);
h = DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset = (MagickOffsetType) ((w + 3) / 4) * ((h + 3) / 4) * texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
if ((w == 1) && (h == 1))
break;
w = DIV2(w);
h = DIV2(h);
}
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
w = DIV2(dds_info->width);
h = DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset = (MagickOffsetType) w * h * pixel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w = DIV2(w);
h = DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
static void WriteAlphas(Image *image, const ssize_t* alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
register ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4* points, const ssize_t* map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if (clusterFit == MagickFalse || count == 0)
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (!image->matte)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
if (image_info->compression == DXT1Compression)
compression=FOURCC_DXT1;
else if (image_info->compression == NoCompression)
pixelFormat=DDPF_RGB;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (IsStringTrue(option) != MagickFalse)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (IsStringTrue(option) != MagickFalse)
weightByAlpha=MagickTrue;
}
}
}
maxMipmaps=SIZE_MAX;
mipmaps=0;
if ((image->columns & (image->columns - 1)) == 0 &&
(image->rows & (image->rows - 1)) == 0)
{
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
&image->exception);
if (mipmaps > 0 && WriteMipmaps(image,pixelFormat,compression,mipmaps,
clusterFit,weightByAlpha,&image->exception) == MagickFalse)
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MaxTextExtent];
register ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (format == DDPF_FOURCC)
flags=flags | DDSD_LINEARSIZE;
else
flags=flags | DDSD_PITCH;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->matte)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (pixelFormat == DDPF_FOURCC)
{
/* Compressed DDS requires linear compressed size of first image */
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8));
else /* DXT5 */
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16));
}
else
{
/* Uncompressed DDS requires byte pitch of first image */
if (image->matte != MagickFalse)
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4));
else
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3));
}
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) memset(software,0,sizeof(software));
(void) CopyMagickString(software,"IMAGEMAGICK",MaxTextExtent);
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) /* bitcount / masks */
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->matte != MagickFalse)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) /* ddscaps2 + reserved region */
(void) WriteBlobLSBLong(image,0x00);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
i,
y,
bx,
by;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const PixelPacket *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(p));
else
alpha = 255;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p++;
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression, const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static inline size_t ClampToLimit(const float value,
const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char* indices)
{
register ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static MagickBooleanType WriteMipmaps(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
Image*
resize_image;
register ssize_t
i;
size_t
columns,
rows;
columns = image->columns;
rows = image->rows;
for (i=0; i< (ssize_t) mipmaps; i++)
{
resize_image = ResizeImage(image,DIV2(columns),DIV2(rows),TriangleFilter,1.0,
exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
DestroyBlob(resize_image);
resize_image->blob=ReferenceBlob(image->blob);
WriteImageData(resize_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
resize_image=DestroyImage(resize_image);
columns = DIV2(columns);
rows = DIV2(rows);
}
return(MagickTrue);
}
static void WriteSingleColorFit(Image *image, const DDSVector4* points,
const ssize_t* map)
{
DDSVector3
start,
end;
register ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(p)));
if (image->matte)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(p)));
p++;
}
}
}
|
GB_binop__bclr_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bclr_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__bclr_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__bclr_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__bclr_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bclr_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__bclr_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_int16)
// C=scalar+B GB (_bind1st__bclr_int16)
// C=scalar+B' GB (_bind1st_tran__bclr_int16)
// C=A+scalar GB (_bind2nd__bclr_int16)
// C=A'+scalar GB (_bind2nd_tran__bclr_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = GB_BITCLR (aij, bij, int16_t, 16)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITCLR (x, y, int16_t, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_INT16 || GxB_NO_BCLR_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bclr_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bclr_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bclr_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bclr_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bclr_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bclr_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bclr_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bclr_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bclr_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITCLR (x, bij, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bclr_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITCLR (aij, y, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (x, aij, int16_t, 16) ; \
}
GrB_Info GB (_bind1st_tran__bclr_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (aij, y, int16_t, 16) ; \
}
GrB_Info GB (_bind2nd_tran__bclr_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
jacobi-ompacc.c | #include <stdio.h>
#include <math.h>
#include <assert.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// Add timing support
#include <sys/time.h>
double time_stamp()
{
struct timeval t;
double time;
gettimeofday(&t,(struct timezone*)NULL);
time = t.tv_sec + 1.0e-6*t.tv_usec;
return time;
}
double time1, time2;
void driver(void);
void initialize(void);
void jacobi(void);
void error_check(void);
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This c version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define REAL float // flexible between float and double
#define MSIZE 512
REAL error_ref= 9.212767E-04, resid_ref = 2.355429E-08; // depending on MSIZE!!
int n,m,mits;
REAL tol,relax=1.0,alpha=0.0543;
REAL u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE];
REAL dx,dy;
int main (void)
{
// float toler;
/* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE);
scanf ("%d",&n);
scanf ("%d",&m);
printf("Input tol - error tolerance for iterative solver\n");
scanf("%f",&toler);
tol=(double)toler;
printf("Input mits - Maximum iterations for solver\n");
scanf("%d",&mits);
*/
n=MSIZE;
m=MSIZE;
tol=0.0000000001;
mits=5000;
#if 0 // Not yet support concurrent CPU and GPU threads
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp single
printf("Running using %d threads...\n",omp_get_num_threads());
}
#endif
#endif
driver ( ) ;
return 0;
}
/*************************************************************
* Subroutine driver ()
* This is where the arrays are allocated and initialzed.
*
* Working varaibles/arrays
* dx - grid spacing in x direction
* dy - grid spacing in y direction
*************************************************************/
void driver( )
{
initialize();
time1 = time_stamp();
/* Solve Helmholtz equation */
jacobi ();
time2 = time_stamp();
printf("------------------------\n");
printf("Execution time = %f\n",time2-time1);
/* error_check (n,m,alpha,dx,dy,u,f)*/
error_check ( );
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize( )
{
int i,j, xx,yy;
//double PI=3.1415926;
dx = 2.0 / (n-1);
dy = 2.0 / (m-1);
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(xx,yy,j,i)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx =(int)( -1.0 + dx * (i-1));
yy = (int)(-1.0 + dy * (j-1)) ;
u[i][j] = 0.0;
f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\
- 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy);
}
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
void jacobi( )
{
REAL omega;
int i,j,k;
REAL error,resid,ax,ay,b;
// double error_local;
// float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2;
// float te1,te2;
// float second;
omega=relax;
/*
* Initialize coefficients */
ax = 1.0/(dx*dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y-direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
while ((k<=mits)&&(error>tol))
{
error = 0.0;
/* Copy new solution into old */
// Must split the omp for into two parallel for regions since the translation focuses on parallel to generate the outlined kernel
// We need two CUDA kernels for implementing global synchronization so we have to have two omp parallel directives!!
//#pragma omp target map(in:n, m, omega, ax, ay, u[0:n][0:m],f[0:n][0:m]) map(alloc:uold[0:n][0:m])
//#pragma omp parallel
// {
#pragma omp target map(in:n, m, u[0:n][0:m]) map(out:uold[0:n][0:m])
#pragma omp parallel for private(j,i)
for(i=0;i<n;i++)
for(j=0;j<m;j++)
uold[i][j] = u[i][j];
#pragma omp target map(in:n, m, omega, ax, ay, b, f[0:n][0:m], uold[0:n][0:m]) map(out:u[0:n][0:m])
#pragma omp parallel for private(resid,j,i) reduction(+:error) // nowait
for (i=1;i<(n-1);i++)
for (j=1;j<(m-1);j++)
{
resid = (ax*(uold[i-1][j] + uold[i+1][j])\
+ ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b;
u[i][j] = uold[i][j] - omega * resid;
error = error + resid*resid ;
}
// }
/* omp end parallel */
/* Error check */
if (k%500==0)
printf("Finished %d iteration with error =%f\n",k, error);
error = sqrt(error)/(n*m);
k = k + 1;
} /* End iteration loop */
printf("Total Number of Iterations:%d\n",k);
printf("Residual:%E\n", error);
printf("Residual_ref :%E\n", resid_ref);
printf ("Diff ref=%E\n", fabs(error-resid_ref));
assert (fabs(error-resid_ref) < 1E-14);
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check ( )
{
int i,j;
REAL xx,yy,temp,error;
dx = 2.0 / (n-1);
dy = 2.0 / (m-1);
error = 0.0 ;
//#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx = -1.0 + dx * (i-1);
yy = -1.0 + dy * (j-1);
temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy);
error = error + temp*temp;
}
error = sqrt(error)/(n*m);
printf("Solution Error :%E \n",error);
printf("Solution Error Ref :%E \n",error_ref);
printf ("Diff ref=%E\n", fabs(error-error_ref));
assert (fabs(error-error_ref) < 1E-14);
}
|
GB_unaryop__minv_bool_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_bool_fp64
// op(A') function: GB_tran__minv_bool_fp64
// C type: bool
// A type: double
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
double
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CASTING(z, aij) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_bool_fp64
(
bool *Cx, // Cx and Ax may be aliased
double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_bool_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
smul.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
int ptiSparseTensorMulScalar(ptiSparseTensor *X, ptiValue const a) {
if(a != 0) {
ptiNnzIndex i;
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(i = 0; i < X->nnz; ++i) {
X->values.data[i] *= a;
}
} else {
X->nnz = 0;
X->values.len = 0;
}
return 0;
}
|
SPHCalcDensityFunctor.h | /**
* @file SPHCalcDensityFunctor.h
* @author seckler
* @date 19.01.18
*/
#pragma once
#include "autopas/pairwiseFunctors/Functor.h"
#include "autopas/particles/OwnershipState.h"
#include "autopas/sph/SPHKernels.h"
namespace autopas::sph {
/**
* Class that defines the density functor.
* It is used to calculate the density based on the given SPH kernel.
* @tparam Particle
* @tparam ParticleCell
*/
template <class Particle>
class SPHCalcDensityFunctor : public Functor<Particle, SPHCalcDensityFunctor<Particle>> {
public:
/// soa arrays type
using SoAArraysType = typename Particle::SoAArraysType;
SPHCalcDensityFunctor() : autopas::Functor<Particle, SPHCalcDensityFunctor<Particle>>(0.){};
bool isRelevantForTuning() override { return true; }
bool allowsNewton3() override { return true; }
bool allowsNonNewton3() override { return true; }
[[nodiscard]] bool isAppropriateClusterSize(unsigned int clusterSize,
DataLayoutOption::Value dataLayout) const override {
return dataLayout == DataLayoutOption::aos; // This functor does only support clusters via aos.
}
/**
* Calculates the density contribution of the interaction of particle i and j.
* It is not symmetric, because the smoothing lenghts of the two particles can
* be different.
* @param i first particle of the interaction
* @param j second particle of the interaction
* @param newton3 defines whether or whether not to use newton 3
*/
inline void AoSFunctor(Particle &i, Particle &j, bool newton3 = true) override {
if (i.isDummy() or j.isDummy()) {
return;
}
const std::array<double, 3> dr = utils::ArrayMath::sub(j.getR(), i.getR()); // ep_j[j].pos - ep_i[i].pos;
const double density =
j.getMass() * SPHKernels::W(dr, i.getSmoothingLength()); // ep_j[j].mass * W(dr, ep_i[i].smth)
i.addDensity(density);
if (newton3) {
// Newton 3:
// W is symmetric in dr, so no -dr needed, i.e. we can reuse dr
const double density2 = i.getMass() * SPHKernels::W(dr, j.getSmoothingLength());
j.addDensity(density2);
}
}
/**
* Get the number of floating point operations used in one full kernel call
* @return the number of floating point operations
*/
static unsigned long getNumFlopsPerKernelCall() {
unsigned long flops = 0;
flops += 3; // calculating dr
flops += 2 * SPHKernels::getFlopsW(); // flops for calling W
flops += 2 * 1; // calculating density
flops += 2 * 1; // adding density
return flops;
}
/**
* @copydoc Functor::SoAFunctorSingle(SoAView<SoAArraysType>, bool)
* This functor ignores the newton3 value, as we do not expect any benefit from disabling newton3.
*/
void SoAFunctorSingle(SoAView<SoAArraysType> soa, bool newton3) override {
if (soa.getNumParticles() == 0) return;
double *const __restrict xptr = soa.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr = soa.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr = soa.template begin<Particle::AttributeNames::posZ>();
double *const __restrict densityptr = soa.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr = soa.template begin<Particle::AttributeNames::smth>();
double *const __restrict massptr = soa.template begin<Particle::AttributeNames::mass>();
const auto *const __restrict ownedStatePtr = soa.template begin<Particle::AttributeNames::ownershipState>();
size_t numParticles = soa.getNumParticles();
for (unsigned int i = 0; i < numParticles; ++i) {
// checks whether particle i is owned.
if (ownedStatePtr[i] == OwnershipState::dummy) {
continue;
}
double densacc = 0.;
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
#pragma omp simd reduction(+ : densacc)
for (unsigned int j = i + 1; j < numParticles; ++j) {
const double drx = xptr[i] - xptr[j];
const double dry = yptr[i] - yptr[j];
const double drz = zptr[i] - zptr[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
// if second particle is a dummy, we skip the interaction.
const bool mask = ownedStatePtr[j] != OwnershipState::dummy;
const double density = mask ? massptr[j] * SPHKernels::W(dr2, smthptr[i]) : 0.;
densacc += density;
// Newton 3:
// W is symmetric in dr, so no -dr needed, i.e. we can reuse dr
const double density2 = mask ? massptr[i] * SPHKernels::W(dr2, smthptr[j]) : 0.;
densityptr[j] += density2;
}
densityptr[i] += densacc;
}
}
/**
* @copydoc Functor::SoAFunctorPair(SoAView<SoAArraysType>, SoAView<SoAArraysType>, bool)
*/
void SoAFunctorPair(SoAView<SoAArraysType> soa1, SoAView<SoAArraysType> soa2, bool newton3) override {
if (soa1.getNumParticles() == 0 || soa2.getNumParticles() == 0) return;
double *const __restrict xptr1 = soa1.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr1 = soa1.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr1 = soa1.template begin<Particle::AttributeNames::posZ>();
double *const __restrict densityptr1 = soa1.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr1 = soa1.template begin<Particle::AttributeNames::smth>();
double *const __restrict massptr1 = soa1.template begin<Particle::AttributeNames::mass>();
double *const __restrict xptr2 = soa2.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr2 = soa2.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr2 = soa2.template begin<Particle::AttributeNames::posZ>();
double *const __restrict densityptr2 = soa2.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr2 = soa2.template begin<Particle::AttributeNames::smth>();
double *const __restrict massptr2 = soa2.template begin<Particle::AttributeNames::mass>();
const auto *const __restrict ownedStatePtr1 = soa1.template begin<Particle::AttributeNames::ownershipState>();
const auto *const __restrict ownedStatePtr2 = soa2.template begin<Particle::AttributeNames::ownershipState>();
size_t numParticlesi = soa1.getNumParticles();
for (unsigned int i = 0; i < numParticlesi; ++i) {
// checks whether particle i is in the domain box, unused if calculateGlobals is false!
if (ownedStatePtr1[i] == OwnershipState::dummy) {
continue;
}
double densacc = 0.;
size_t numParticlesj = soa2.getNumParticles();
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
#pragma omp simd reduction(+ : densacc)
for (unsigned int j = 0; j < numParticlesj; ++j) {
const double drx = xptr1[i] - xptr2[j];
const double dry = yptr1[i] - yptr2[j];
const double drz = zptr1[i] - zptr2[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
// if second particle is a dummy, we skip the interaction.
const bool mask = ownedStatePtr2[j] != OwnershipState::dummy;
const double density = mask ? massptr2[j] * SPHKernels::W(dr2, smthptr1[i]) : 0.;
densacc += density;
if (newton3) {
// Newton 3:
// W is symmetric in dr, so no -dr needed, i.e. we can reuse dr
const double density2 = mask ? massptr1[i] * SPHKernels::W(dr2, smthptr2[j]) : 0.;
densityptr2[j] += density2;
}
}
densityptr1[i] += densacc;
}
}
// clang-format off
/**
* @copydoc Functor::SoAFunctorVerlet(SoAView<SoAArraysType> soa, const size_t indexFirst, const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList, bool newton3)
*/
// clang-format on
void SoAFunctorVerlet(SoAView<SoAArraysType> soa, const size_t indexFirst,
const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList,
bool newton3) override {
if (soa.getNumParticles() == 0) return;
const auto *const __restrict ownedStatePtr = soa.template begin<Particle::AttributeNames::ownershipState>();
// checks whether particle i is owned.
if (ownedStatePtr[indexFirst] == OwnershipState::dummy) {
return;
}
double *const __restrict xptr = soa.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr = soa.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr = soa.template begin<Particle::AttributeNames::posZ>();
double *const __restrict densityptr = soa.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr = soa.template begin<Particle::AttributeNames::smth>();
double *const __restrict massptr = soa.template begin<Particle::AttributeNames::mass>();
double densacc = 0;
const auto ¤tList = neighborList;
size_t listSize = currentList.size();
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
#pragma omp simd reduction(+ : densacc)
for (unsigned int j = 0; j < listSize; ++j) {
const double drx = xptr[indexFirst] - xptr[currentList[j]];
const double dry = yptr[indexFirst] - yptr[currentList[j]];
const double drz = zptr[indexFirst] - zptr[currentList[j]];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
// if second particle is a dummy, we skip the interaction.
const bool mask = ownedStatePtr[currentList[j]] != OwnershipState::dummy;
const double density = mask ? massptr[currentList[j]] * SPHKernels::W(dr2, smthptr[indexFirst]) : 0.;
densacc += density;
if (newton3) {
// Newton 3:
// W is symmetric in dr, so no -dr needed, i.e. we can reuse dr
const double density2 = mask ? massptr[indexFirst] * SPHKernels::W(dr2, smthptr[currentList[j]]) : 0.;
densityptr[currentList[j]] += density2;
}
}
densityptr[indexFirst] += densacc;
}
/**
* @copydoc Functor::getNeededAttr()
*/
constexpr static auto getNeededAttr() {
return std::array<typename Particle::AttributeNames, 7>{
Particle::AttributeNames::mass, Particle::AttributeNames::posX, Particle::AttributeNames::posY,
Particle::AttributeNames::posZ, Particle::AttributeNames::smth, Particle::AttributeNames::density,
Particle::AttributeNames::ownershipState};
}
/**
* @copydoc Functor::getNeededAttr(std::false_type)
*/
constexpr static auto getNeededAttr(std::false_type) {
return std::array<typename Particle::AttributeNames, 6>{
Particle::AttributeNames::mass, Particle::AttributeNames::posX, Particle::AttributeNames::posY,
Particle::AttributeNames::posZ, Particle::AttributeNames::smth, Particle::AttributeNames::ownershipState};
}
/**
* @copydoc Functor::getComputedAttr()
*/
constexpr static auto getComputedAttr() {
return std::array<typename Particle::AttributeNames, 1>{Particle::AttributeNames::density};
}
};
} // namespace autopas::sph
|
XSHA_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2008,2011 by Solar Designer
*
* Intrinsics support added by magnum 2011.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_XSHA;
#elif FMT_REGISTERS_H
john_register_one(&fmt_XSHA);
#else
#include <string.h>
#include "arch.h"
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1)
#ifdef _OPENMP
static unsigned int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 128
#endif
#endif
#endif
#include "simd-intrinsics.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "sha.h"
#include "johnswap.h"
#include "memdbg.h"
#define FORMAT_LABEL "xsha"
#define FORMAT_NAME "Mac OS X 10.4 - 10.6"
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 51
#define CIPHERTEXT_LENGTH 48
#define BINARY_SIZE 20
#define BINARY_ALIGN 4
#define SALT_SIZE 4
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define FMT_IS_BE
#include "common-simd-getpos.h"
#else
#define MIN_KEYS_PER_CRYPT 1
#ifdef _OPENMP
#define MAX_KEYS_PER_CRYPT (0x200 * 3)
#else
#define MAX_KEYS_PER_CRYPT 0x100
#endif
#endif
static struct fmt_tests tests[] = {
{"12345678F9083C7F66F46A0A102E4CC17EC08C8AF120571B", "abc"},
{"12345678EB8844BFAF2A8CBDD587A37EF8D4A290680D5818", "azertyuiop1"},
{"3234C32AAA335FD20E3F95870E5851BDBE942B79CE4FDD92", "azertyuiop2"},
{"01295B67659E95F32931CEDB3BA50289E2826AF3D5A1422F", "apple"},
{"0E6A48F765D0FFFFF6247FA80D748E615F91DD0C7431E4D9", "macintosh"},
{"A320163F1E6DB42C3949F7E232888ACC7DB7A0A17E493DBA", "test"},
{"743777471285CB3566886D4821D556E475E0DF9234308B22", "123"},
{"474379622BD7B9F84BD6E4BB52ABF9D01705EFB0A2426655", "passWOrd"},
{"597A523666A10C534495DB6333CF7EBA70C1A578CADE11A3", ""},
{NULL}
};
#ifdef SIMD_COEF_32
static uint32_t (*saved_key);
static uint32_t (*crypt_key);
static uint32_t cur_salt;
#else
static char saved_key[MAX_KEYS_PER_CRYPT][PLAINTEXT_LENGTH + 1];
static int saved_len[MAX_KEYS_PER_CRYPT];
static SHA_CTX ctx_salt;
static uint32_t crypt_out[MAX_KEYS_PER_CRYPT][5];
#endif
static void init(struct fmt_main *self)
{
#ifdef SIMD_COEF_32
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt = omp_t * NBKEYS;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt = omp_t * NBKEYS;
#endif
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
SHA_BUF_SIZ * 4, MEM_ALIGN_SIMD);
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt,
BINARY_SIZE, MEM_ALIGN_SIMD);
#endif
}
static void done(void)
{
#ifdef SIMD_COEF_32
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
#endif
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *pos;
/* Require uppercase hex digits (assume ASCII) */
pos = ciphertext;
while (atoi16[ARCH_INDEX(*pos)] != 0x7F && *pos < 'a')
pos++;
return !*pos && pos - ciphertext == CIPHERTEXT_LENGTH;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
char *p;
int i;
if (!out)
out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
p = ciphertext + 8;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
#if defined(SIMD_COEF_32) && ARCH_LITTLE_ENDIAN==1
alter_endianity(out, BINARY_SIZE);
#endif
return out;
}
static void *get_salt(char *ciphertext)
{
static unsigned int outbuf[SALT_SIZE / sizeof(int)];
unsigned char *out = (unsigned char*)outbuf;
char *p;
int i;
p = ciphertext;
for (i = 0; i < SALT_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
#if defined(SIMD_COEF_32) && ARCH_LITTLE_ENDIAN==1
alter_endianity(out, SALT_SIZE);
#endif
return out;
}
#define COMMON_GET_HASH_SIMD32 5
#define COMMON_GET_HASH_VAR crypt_out
#define COMMON_GET_HASH_SIMD_VAR crypt_key
#include "common-get-hash.h"
static int salt_hash(void *salt)
{
return *(uint32_t *)salt & (SALT_HASH_SIZE - 1);
}
static void set_salt(void *salt)
{
#ifdef SIMD_COEF_32
cur_salt = *(uint32_t*)salt;
#else
SHA1_Init(&ctx_salt);
SHA1_Update(&ctx_salt, salt, SALT_SIZE);
#endif
}
#define SALT_PREPENDED SALT_SIZE
#define NON_SIMD_SET_SAVED_LEN
#include "common-simd-setkey32.h"
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
#ifdef SIMD_COEF_32
int i = 0;
#if defined(_OPENMP)
#pragma omp parallel for
for (i=0; i < omp_t; i++) {
#endif
unsigned int *in = &saved_key[i*NBKEYS*SHA_BUF_SIZ];
unsigned int *out = &crypt_key[i*NBKEYS*BINARY_SIZE/4];
unsigned int j;
for (j=0; j < NBKEYS; j++)
in[(j&(SIMD_COEF_32-1)) + j/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = cur_salt;
SIMDSHA1body(in, out, NULL, SSEi_MIXED_IN);
#if defined(_OPENMP)
}
#endif
#else
int i;
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(ctx_salt, saved_key, saved_len, crypt_out)
#endif
for (i = 0; i < count; i++) {
SHA_CTX ctx;
memcpy(&ctx, &ctx_salt, sizeof(ctx));
SHA1_Update(&ctx, saved_key[i], saved_len[i]);
SHA1_Final((unsigned char *)(crypt_out[i]), &ctx);
}
#endif
return count;
}
static int cmp_all(void *binary, int count)
{
#ifdef SIMD_COEF_32
unsigned int x,y=0;
#ifdef _OPENMP
for (;y<SIMD_PARA_SHA1*omp_t;y++)
#else
for (;y<SIMD_PARA_SHA1;y++)
#endif
for (x=0;x<SIMD_COEF_32;x++)
{
if ( ((uint32_t *)binary)[0] == ((uint32_t *)crypt_key)[x+y*SIMD_COEF_32*5] )
return 1;
}
return 0;
#else
uint32_t b0 = *(uint32_t *)binary;
int i;
for (i = 0; i < count; i++) {
if (b0 != crypt_out[i][0])
continue;
if (!memcmp(binary, crypt_out[i], BINARY_SIZE))
return 1;
}
return 0;
#endif
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
if ( ((uint32_t *)binary)[0] != ((uint32_t *)crypt_key)[x+y*SIMD_COEF_32*5] )
return 0;
if ( ((uint32_t *)binary)[1] != ((uint32_t *)crypt_key)[x+y*SIMD_COEF_32*5+SIMD_COEF_32] )
return 0;
if ( ((uint32_t *)binary)[2] != ((uint32_t *)crypt_key)[x+y*SIMD_COEF_32*5+2*SIMD_COEF_32] )
return 0;
if ( ((uint32_t *)binary)[3] != ((uint32_t *)crypt_key)[x+y*SIMD_COEF_32*5+3*SIMD_COEF_32] )
return 0;
if ( ((uint32_t *)binary)[4] != ((uint32_t *)crypt_key)[x+y*SIMD_COEF_32*5+4*SIMD_COEF_32] )
return 0;
return 1;
#else
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_XSHA = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_OMP | FMT_OMP_BAD | FMT_CASE | FMT_8_BIT,
{ NULL },
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
SymbolicDerivatives.h | #ifndef _SymbolicDerivatives_H_
#define _SymbolicDerivatives_H_
// If true, this directive require CERES installed, then it allows a comparison with CERES/Jet (for a potential publication) ..
#define MMVII_WITH_CERES false
#ifdef _OPENMP
#include <omp.h>
#endif
// #define SYMBDER_WITH_MMVII true
#define SYMBDER_WITH_EIGEN false
#if SYMBDER_WITH_EIGEN
#include "ExternalInclude/Eigen/Dense" // TODO => replace with standard eigen file
#define EIGEN_ALLIGNMENT_IN_MMVII EIGEN_MAKE_ALIGNED_OPERATOR_NEW
#else
#define EIGEN_ALLIGNMENT_IN_MMVII
#endif
/*
*/
/** \file SymbolicDerivates.h
\brief File for generating symbolic derivate
Classes for generated symbolic derivative. All classes are single template classes.
The template parameter indicate the numerical type used for storage/computation
(float, double ...)
This file is the only file to include. It contains :
* declaration of operators
* definition of "main" classes : cFormula , cCoordinatorF , cImplemF " ;
* the 3 class for Atomic formula who will (probably) stay the same : Unkown, Observation, Constants
This file include 2 files corresponding to following type of formula :
* classes for "unary" formulas in "MMVII_FormDer_UnaryOp.h"
* classes for "binary" formulas in "MMVII_FormDer_BinaryOp.h"
These 2 files have "vocation" to be extended during the future.
-------------------------------------------------
* cFormula<Type> : represent a mathematicall formula; as in math :
- if F is a formula, exp(F), log(F) ....are formulas
- if F1 and F2 are formulas, F1+F2 , F1*F2 ... are formulas
- there exist some atomic formulas like constants, unknown and observations
- if F is a formula F->Derivate(k) is a formula corresponding to is derivate dF/dXk
Formulas are a complete algebric type.
* cCoordinatorF<Type> : is the "coordinator" class.
This class has, between others, the responsability of :
- creating the initial atomic formula corresponding to unknowns and observation
- maintain an inventory of existing formulas for efficiency purpose
* Using this library is mainly :
- create a coordinator with a given number of unkown and observations
- create a formula using atoms an operator, generally the user function creating a
formula will be a template that can operate on any complete algebric type
(double, float, Formula , jets ...)
- indicate to the coordinator the formula you want work on, with generally its derivate
- evaluate the values of the formula for given unknows and observations
cFormula<Type> is no more than an encapsulation of a pointer on the "concrete" class cImplemF.
* cImplemF<Type> : is the mother class of all the formula. It's a pure abstract class, it contains
several pure virtual methods. The two main methods are "Derivate" and "ComputeBuf", this is
the two methods the users will have to define when extension to the library with new
operator is required.
- cFormula<Type> Derivate(int aK) return the formula of its derivate by Xk. Heres is
two example extract from the code, one for multiplication, other from unknowns :
o return mF2*mF1->Derivate(aK) + mF1*mF2->Derivate(aK); // From cMulF : (FG)' = F'G + FG'
o return (aK==mNumUnk) ? tImplemF::mCoordF->Cste1() : tImplemF::mCoordF->Cste0(); // from cUnknownF
- void ComputeBuf(int aK0,int aK1) : update the buffer of its data, once it subformula has
been updated, this is method that does the real job. Here an extract from cExpF and cDivF :
o for (int aK=aK0 ; aK<aK1 ; aK++) mDataBuf[aK] = std::exp(mDataF[aK]);
o for (int aK=aK0 ; aK<aK1 ; aK++) mDataBuf[aK] = mDataF1[aK] / mDataF2[aK];
*/
#include "SymbDer_Common.h"
/*
#if (SYMBDER_WITH_MMVII)
#include "include/MMVII_all.h"
#include "include/MMVII_Derivatives.h"
#define SYMBDER_cMemCheck MMVII::cMemCheck
#else //========================================================== WITH_MMVI
class SYMBDER_cMemCheck
{
};
#endif
*/
#if (SYMBDER_WITH_MMVII)
#else
#include <memory>
#include <map>
#include <iostream>
#include <cassert>
#include "memory.h"
#include <memory>
#include <iostream>
#include <fstream>
#include <string>
#include <typeinfo>
#include <vector>
#include <list>
#include <map>
#include <ctime>
#include <chrono>
#include <math.h>
#include <cmath>
#include <algorithm>
#include <sstream>
#include <iomanip>
#endif //========================================================== WITH_MMVI
// REDUCTION RULES
// TODO => REPLACE BY METHOD ON COORDINATOR WHEN THEY IMPROVE THINGS ....
#define DOREDUCE false
#define REDUCE_CSTE true // Cste+Cste => cste
#define REDUCE_MM DOREDUCE // - - x => x ; a-(-b) => a+b
#define REDUCE_ASSOCP DOREDUCE /* B + (A + C) = > A + ( B + C),
more generally order the + operator, could be done with '*' */
#define REDUCE_DISTRIB DOREDUCE // A#B ~ A#C=> A#(B~C) ; # in "*/" and ~ in "+-"
#define REDUCE_ApA DOREDUCE // A+A => 2*A, not good by itself, but may creat other reduc
#define REDUCE_DIST1 DOREDUCE // A + A*C => A *(1+C) si C est csteto have all constant close
static inline void SHOW_REDUCE(const std::string & aMes) {} // std::cout << "REDUCE " << aMes << "\n";}
namespace NS_SymbolicDerivative
{
/* *************************************************** */
/* */
/* P0-Definition of global functions */
/* */
/* *************************************************** */
/// The CreateCste is required for formula, so we need it also on num type
template <class Type> inline Type CreateCste(const Type & aV,const Type &) { return aV; }
/// because pow is defined in std and there is cast int->float that would make it unaccessible
template <class Type> inline Type pow(const Type & aV,const int & aExp)
{
return std::pow(aV,Type(aExp));
}
//============= BASIC ERROR HANDLING ==============
/** This function computes derivates by finites difference
It is used in the tests to check correction of symbolic derivatives. Also used
in didactic parts.
*/
template <class Type,class TypeFct>
std::vector<Type> NumericalDerivate
(
TypeFct & aFctr, ///< Function
const std::vector<Type> & aVUk, ///< Unknown
const std::vector<Type> & aVObs, ///< Observations
int aNumVar, ///< Num of unknown we derivate by
const Type & aEpsilon ///< "Small" number to compute variations
)
{
std::vector<Type> aVPlus = aVUk;
aVPlus.at(aNumVar) += aEpsilon;
std::vector<Type> aResPlus = aFctr( aVPlus,aVObs);
std::vector<Type> aVMinus = aVUk;
aVMinus.at(aNumVar) -= aEpsilon;
std::vector<Type> aResMinus = aFctr( aVMinus,aVObs);
std::vector<Type> aDerivate;
for (size_t aK=0 ; aK<aResPlus.size() ; aK++)
aDerivate.push_back((aResPlus.at(aK)-aResMinus.at(aK)) / (2*aEpsilon));
return aDerivate;
}
/* *************************************************** */
/* *************************************************** */
/* * * */
/* * Main user interace * */
/* * * */
/* *************************************************** */
/* *************************************************** */
// ------------- The two classes visible by user are cFormula and cCoordinatorF ------
/** Abstraction of mathemicall formula, this the object manipulated by user, its
has all algerbric operation required. This object is just an encapsulation of
a pointer on cImplemF.
*/
template <class TypeElem> class cFormula ;
/** Class for managing the "context", i.e. coordinating all the formula
and their derivative corresponding to a single use .
A coordinator is also a calculator, as it make computation on formulas
*/
template <class TypeElem> class cCoordinatorF;
// -------- Declaration all binary operators ----------------
// For each operator with have the 3 versions "Formula x Formula" ,
// "Number x Formula" and "Formula x Number" , the two last are rather
// syntactic suggar (i.e. make usage easier, but do not extend the library power)
// Operator +
template <class TypeElem> cFormula <TypeElem>
operator +(const cFormula <TypeElem> & aF1 ,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator +(const TypeElem & aV1,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator +(const cFormula <TypeElem> & aF1,const TypeElem & aV2);
// Operator *
template <class TypeElem> cFormula <TypeElem>
operator *(const cFormula <TypeElem> & aF1 ,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator *(const TypeElem & aV1,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator *(const cFormula <TypeElem> & aF1,const TypeElem & aV2);
// Operator -
template <class TypeElem> cFormula <TypeElem>
operator -(const cFormula <TypeElem> & aF1 ,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator -(const TypeElem & aV1,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator -(const cFormula <TypeElem> & aF1,const TypeElem & aV2);
// Operator /
template <class TypeElem> cFormula <TypeElem>
operator /(const cFormula <TypeElem> & aF1 ,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator /(const TypeElem & aV1,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> operator /(const cFormula <TypeElem> & aF1,const TypeElem & aV2);
// pow
template <class TypeElem> cFormula <TypeElem>
pow (const cFormula <TypeElem> & aF1 ,const cFormula <TypeElem> & aF2);
template <class TypeElem> cFormula <TypeElem> pow (const TypeElem & aV1,const cFormula <TypeElem> & aF2);
/// This one defined in MMVII_FormDer_UnaryOp.h
template <class TypeElem> cFormula <TypeElem> pow (const cFormula <TypeElem> & aF1,const TypeElem & aV2);
template <class TypeElem> cFormula <TypeElem> pow (const cFormula <TypeElem> & aF1,const int & aV2);
// -------- integer low power ----------------
template <class TypeElem> cFormula <TypeElem> square(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> cube(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> pow4(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> pow5(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> pow6(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> pow7(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> pow8(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> pow9(const cFormula <TypeElem> & aF);
// --- other unary operator
template <class TypeElem> cFormula <TypeElem> exp(const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> operator - (const cFormula <TypeElem> & aF);
template <class TypeElem> cFormula <TypeElem> log(const cFormula <TypeElem> & aF);
// ---- sometime we need a templetized way to create constants
template <class T> cFormula<T> CreateCste(const T & aV,const cFormula<T> & aF);
/// --- powI , return pow of integral exponent,
template <class Type> Type powI(const Type & aV,const int & aExp)
{
switch (aExp)
{
// case 0 : return Type(1.0);
case 0 : return CreateCste(1.0,aV);
case 1 : return aV;
case 2 : return square(aV);
case 3 : return cube(aV);
case 4 : return pow4(aV);
case 5 : return pow5(aV);
case 6 : return pow6(aV);
case 7 : return pow7(aV);
case 8 : return pow8(aV);
case 9 : return pow9(aV);
}
// else use the classical pow
return pow(aV,aExp);
}
// -------- Declaration of Coordinator class ----------------
template <class TypeElem> class cCoordinatorF : public cCalculator<TypeElem> // , public SYMBDER_cMemCheck
{
public :
typedef cFormula <TypeElem> tFormula;
typedef std::vector<TypeElem> tOneRes;
// --------------------------- Constructors / Destructor -------------------
/// Constructor with explicit Id for Unknown/Observation. Used if we want to analyze the generated code
inline cCoordinatorF(const std::string &aName, int SzBuf, const std::vector<std::string> & aVecUK, const std::vector<std::string> & aVecObs);
/// Constructor with basic Id (used if we dont generate code, or dont want to analyse it by human)
inline cCoordinatorF(const std::string &aName, int SzBuf,int aNbUnknown,int aNbObservation);
/// Destructeur will free allocated formulas
virtual ~cCoordinatorF();
/// Copies are not allowed on this kind of object.
cCoordinatorF(const cCoordinatorF<TypeElem> &) = delete;
// --------------------------- Accessors to Atomic Formulas -------------------
const std::vector<tFormula>& VUk() const {return mVFormUnknowns;} ///< Unknowns
const std::vector<tFormula>& VObs() const {return mVFormObservations;} ///< Observations
// --------------------------- Manipulation -------------------
/// Set the formulas that with be used for computation
void SetCurFormulas(const std::vector<tFormula> &);
/** SetCurFormulas + all its derivative , order of storage will be
VF0 dVF0/dX0 dVF0/dX1 .... VF1 dVF1/dX0 ... */
void SetCurFormulasWithDerivative(const std::vector<tFormula> & aVF);
// ---------- Code generator ---------------
/** Generate code, class cName , file cName.h, cName.cpp. Return filename w/o ext, or "" if error */
std::pair<std::string,std::string> GenerateCode(const std::string &aFilePrefix="CodeGen_") const
{ return GenCodeShortExpr(aFilePrefix);
}
std::pair<std::string,std::string> GenerateCodeTemplate(const std::string &aFilePrefix="CodeGen_") const
{ return GenCodeShortExprTemplate(aFilePrefix);
}
std::pair<std::string,std::string> GenerateCodeForType(const std::string& aTypeName, const std::string &aFilePrefix="CodeGen_") const
{ return GenCodeShortExprForType(aTypeName,aFilePrefix);
}
std::pair<std::string,std::string> GenCodeShortExpr(const std::string &aFilePrefix="CodeGen_") const
{
return GenCodeCommon(aFilePrefix, "", true);
}
std::pair<std::string,std::string> GenCodeLonExpr(const std::string &aFilePrefix="CodeGen_") const
{
return GenCodeCommon(aFilePrefix, "", false);
}
std::pair<std::string,std::string> GenCodeShortExprTemplate(const std::string &aFilePrefix="CodeGen_") const
{
return GenCodeCommon(aFilePrefix, "template<>", true);
}
std::pair<std::string,std::string> GenCodeLonExprTemplate(const std::string &aFilePrefix="CodeGen_") const
{
return GenCodeCommon(aFilePrefix, "template<>", false);
}
std::pair<std::string,std::string> GenCodeShortExprForType(const std::string& aTypeName, const std::string &aFilePrefix="CodeGen_") const
{
return GenCodeCommon(aFilePrefix, aTypeName, true);
}
std::pair<std::string,std::string> GenCodeLonExprForType(const std::string& aTypeName, const std::string &aFilePrefix="CodeGen_") const
{
return GenCodeCommon(aFilePrefix, aTypeName, false);
}
// =========== Parametrisation of the generated code =========
/// The default value is not always adequate "SymbDer/SymbDer_Common.h"
void SetHeaderIncludeSymbDer(const std::string &aH) {mHeaderIncludeSymbDer= aH;}
void SetDirGenCode(const std::string &aDir) {mDirGenCode= aDir;}
private : // END-USER
/* =================================================================================
ABOVE WAS THE REAL PUBLIC PART OF cCoordinatorF FOR USER OF LIBRARY. THE REST
IS PUBLIC FOR IMPLEMENTERS BUT NOT NEEDED BY USER
=====================================================================================*/
public :
// Result of several evaluation are stored in a buffer, Eigen vector are used
// as they implement efficiently arithmetical operation
// typedef Eigen::Array<TypeElem, 1, Eigen::Dynamic> tBuf;
typedef std::vector<TypeElem> tBuf;
// --------------------------- Acces to function from names, values -------------------
/// Indicate if the formula corresponding to a given string already exist
inline bool ExistFunc(const std::string & aName) const
{
return (mDicoFunc.find(aName) != mDicoFunc.end());
}
/// Func of given name, Error if don't exist
inline tFormula FuncOfName(const std::string & aName) const ;
/// Add a function (put it in dico), Error if already exist
inline void AddFormula(tFormula aPF)
{
if (ExistFunc(aPF->Name())) InternalError ("Multiple add of identic name :[" + aPF->Name() + "]",this->Name());
mDicoFunc[aPF->Name()] = aPF;
mVAllFormula.push_back(aPF);
aPF->TryReducAssoc();
}
/// Func of given constant, create if don't exist
inline tFormula CsteOfVal(const TypeElem & aCste) ;
tFormula Cste0() const {return mCste0;} ///< Acces to a current constant
tFormula Cste1() const {return mCste1;} ///< Another Acces to a current constant
tFormula Cste2() const {return mCste2;} ///< Yet another Acces to a current constant
/// Tuning --- Print the stack of function as a tree
inline void ShowStackFunc() const;
/// Formula used for computation,
const std::vector<tFormula>& VReached() const {return mVReachedF;}
// Current (top) formulas
const std::vector<tFormula>& VCurrent() const {return mVCurF;}
size_t NbCurFonc() const {return mVAllFormula.size();}
private :
/// Called by cCalculator::PushNewEvals to Set Unknown/Observations
virtual void SetNewUks(const std::vector<TypeElem> &aVUks) override;
virtual void SetNewObs(const std::vector<TypeElem> &aVObs) override;
/** Make the evaluation of current functions on pushed values */
virtual void DoEval() override;
/// Used to generate automatically Id for Unknown/Observatio, when we dont need to control them explicitely
static std::vector<std::string> MakeAutomId(const std::string & aPrefix,int aNb);
std::pair<std::string,std::string> GenCodeCommon(const std::string &aPrefix, std::string aTypeName, bool isShortExpr) const;
std::string TypeElemName() const;
size_t mNbCste; ///< Number Cste
std::vector<tFormula> mVFormUnknowns; ///< Vector of All Unknowns
std::vector<tFormula> mVFormObservations; ///< Vector of All Observations
std::map<std::string,tFormula> mDicoFunc; ///< Map Name => Func
std::vector<tFormula> mVAllFormula; ///< Vector of All Func, allow to parse them in creation order
std::map<TypeElem,tFormula> mDicoCste; ///< Map Value => Func Constant
tFormula mCste0; ///< Fonc constant null
tFormula mCste1; ///< Fonc constant 1
tFormula mCste2; ///< Fonc constant 1
std::vector<tFormula> mVCurF; ///< Current evaluted formulas
std::vector<tFormula> mVReachedF; ///< Formula "reachable" i.e. necessary to comput mVCurF
std::string mHeaderIncludeSymbDer; ///< Compilation environment may want to change it
std::string mDirGenCode; ///< Want to put generated code in a fixed folde ?
};
/* **************************************************
* *
* Pre-Declaration of all classes *
* Not required by compilation *
* (Except for cImplemF )but I like to have *
* a quick view of all existing classes *
* *
* **************************************************/
/** "Mother" Interface class of all classes implementing the service ,
abstract class with pure virtual method
*/
template <class TypeElem> class cImplemF ;
// --------------- "Atomic" function : Unknown, constant, observation-----------------
template <class TypeElem> class cAtomicF ; ///< Mother Class of all atomic formulas
/// "Observations" corresponding to user constant (change for each evaluation)
template <class TypeElem> class cObservationF ;
/// "Constant" function
template <class TypeElem> class cConstantF ;
/// "Unknown" for representing coordinates function X0,X1,X2 ....
template <class TypeElem> class cUnknownF;
// ----------------------------- Unary operator ------------------------------------
template <class TypeElem> class cUnaryF ; ///< Mother Class of all unary operator
template <class TypeElem> class cSquareF ; ///< Class for square operator
template <class TypeElem> class cExpF ; ///< Class for exponential operator
template <class TypeElem> class cMin1F ; ///< Class for Unary Minus
template <class TypeElem> class cLogF ; ///< Class for neperien log
// -------------------------------- Binary operator -------------------------------------
template <class TypeElem> class cBinaryF ; ///< Mother class of binary operators
template <class TypeElem> class cSumF ; ///< Class for sum of 2 functions
template <class TypeElem> class cMulF ; ///< Class for multiplication of 2 functions
template <class TypeElem> class cSubF ; ///< Class for substraction of 2 functions
template <class TypeElem> class cDivF ; ///< Class for division of 2 functions
template <class TypeElem> class cPowF ; ///< Class for division of 2 functions
/* *************************************************** */
/* *************************************************** */
/* * * */
/* * Definition of all classes * */
/* * * */
/* *************************************************** */
/* *************************************************** */
// ------------------- 2 "Main" Classes -------------------------
// cFormula / cImplemF
// ----------------------------------------------------------------
template <class TypeElem> class cImplemF : public SYMBDER_cMemCheck
{
public :
// See eigen documentation, this macro is mandatory for alignment reason
// EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EIGEN_ALLIGNMENT_IN_MMVII
typedef TypeElem tElem;
typedef cCoordinatorF<TypeElem> tCoordF;
typedef typename tCoordF::tBuf tBuf;
typedef typename tCoordF::tFormula tFormula;
//----------- For derivation and reduction--------------
virtual bool IsCste(const TypeElem &) const {return false;} ///< To redefine in constant func, Used for simplification in "/ * + -"
virtual bool IsDistribInt() const {return false;} ///< To redefine in *,/ for distributivity
virtual tFormula Derivate(int aK) const = 0; ///< Compute the formula of it's derivative to Kth unknown
/** In this functionwe try to make reduction using associativity (and maybe others),
as we want to do it only on maximal chains of + (or *) this has to be run by the father of
the chain
*/
void TryReducAssoc();
virtual cImplemF<TypeElem> * ReducAssoc() {return this;}
virtual bool IsMult() const {return false;}
virtual bool IsSum() const {return false;}
bool ReducAssocTried() const {return mReducAssocTried;}
virtual cFormula<TypeElem> VOper2(const tFormula &,const tFormula &) const; ///< Use in distributive reducion to recal the operator binaire if suitable
// -------------- For Computation -------------------------
/// Method that wil compute data inside mBuf
virtual void ComputeBuf(int aK0,int aK1) =0;
/// Return "Sub"-formula referenced
virtual std::vector<tFormula> Ref() const =0;
// ---------- Accessors ---------------
const std::string & Name() const {return mName;} ///< Standard accessor
tCoordF * CoordF() const {return mCoordF;} ///< Standard accesor
int NumGlob() const {return mNumGlob;} ///< Standard accessor
// ---------- Acces to Buf data ---------------
void SetBuf(size_t anIndex,const TypeElem & aVal) {mBuf.at(anIndex) = aVal;}
const TypeElem & GetBuf(size_t anIndex) {return mBuf.at(anIndex);}
TypeElem * DataBuf() {return mDataBuf;}
// ---------- Reached Flag ---------------
bool Reached() const {return mReached;} ///< Standard accessor
void SetReached(bool IsReached) {mReached = IsReached;} ///< Fix Reached
/// Compute in the reference graphe and put formula explored in VReached
void CalcRecursiveDepth(std::vector<tFormula> & VReached) ;
int Depth() const {return mDepth;} ///< Standard accessor
void SetDepth(int aDepth) {mDepth = aDepth;} ///< Fix Reached
// ---------- Code gen -----------------------
virtual bool isAtomic() const { return false;}
virtual std::string GenCodeFormName() const {return NameGlob();} // Name of formula, referenced value for Atomic
virtual std::string GenCodeShortExpr() const = 0; // N-Addresses code generation
virtual std::string GenCodeDef() const = 0; // Formula definition generation
virtual std::string GenCodeRef() const; // Formula reference generation
int UsedCnt() const {return mUsedCnt;} ///< Standard accessor
// ---------- Tuning / Debugging / Analysing ---------------
/// Used to print constant from generic formula
virtual const TypeElem * ValCste() const {return nullptr;}
/// Infixed "Pretty" Print . For tuning and checking (i.e correction of reduction, derivative, rewrite ...)
virtual std::string InfixPPrint() const =0;
/// Number of reference that would occur without reduction on identic formula (to test performance in paper)
int RecursiveRec() const;
// Every where a reference name is needed
std::string NameGlob() const { return "F" + std::to_string(NumGlob());}
/// Access at global level is 4 reducing, also it is used 4 implemant in Unary & Binary
virtual const std::string & NameOperator() const = 0;
// -------------------- Destructor / Constructor --------------------------
virtual ~cImplemF () {} ///< Add a virtual ~X() when we have virtual methods, who knows ...
protected :
inline cImplemF (tCoordF * aCoordF,const std::string & aName) :
mCoordF (aCoordF),
mBuf (mCoordF->SzBuf(),TypeElem(0.0)),
mDataBuf (mBuf.data()),
mName (aName),
mNumGlob (mCoordF->NbCurFonc()),
mReached (false),
mDepth (-1),
mUsedCnt (0),
mReducAssocTried (false)
{
}
tCoordF * mCoordF; ///< Coordinator that manage all the funcion cooperating
tBuf mBuf; ///< Buf to store values
TypeElem * mDataBuf; ///< Raw pointer
const std::string mName; ///< string represention of the formula as for ex : C2, X1, V0 , square F3, F18/F3 ...
int mNumGlob; ///< Global number (!= Num in class)
bool mReached; ///< Flag to know if a formula is usefull for compute current
int mDepth; ///< Used for topological sort
private :
cImplemF (const cImplemF<TypeElem> &) = delete; ///< No Copy
unsigned mUsedCnt;
bool mReducAssocTried;
};
template <class TypeElem> class cFormula
{
public :
typedef cCoordinatorF<TypeElem> tCoordF;
typedef cImplemF<TypeElem> tImplemF;
typedef typename tCoordF::tFormula tFormula;
// -------------------- constructor -------------------
/// Construct from a pointer, standard
cFormula (tImplemF * aRawPtr) :
mPtr (aRawPtr)
{
}
/// Default constructor, required by some code (vector ?)
cFormula ():
cFormula <TypeElem> (nullptr)
{
}
// --------------- operator on pointer ---------------------
// UNUSED 4 NOW tImplemF & operator*() const {return *mPtr;} ///< Standard behaviour of a pointer
tImplemF * operator->() const {return mPtr;} ///< Standard behaviour of a pointer
tImplemF * RawPtr() const {return mPtr;} ///< Explicit acces
// DO NOT WORK const std::unique_ptr<tImplemF> operator->() const {return std::unique_ptr<mPtr>;}
bool IsNull() const {return mPtr==nullptr;} ///< Safer than giving acces to raw pointer
// --------------- Naming ---------------------
/// Generate the unique indentifier of a binary expression
std::string NameFormulaBin(const std::string & aNameOper,const tFormula & aF2) const
{
return (*this)->NameGlob() + aNameOper + aF2->NameGlob();
}
/// Generate the unique indentifier of a unary expression
std::string NameFormulaUn(const std::string & aNameOper) const
{
return aNameOper + " " + (*this)->NameGlob();
}
/// To allow destruction without giving access to raw pointer
void FreeMem() {delete mPtr; mPtr=nullptr;}
private :
tImplemF* mPtr; ///< Faster than shared and deallocation is easy as object controlled by context
};
/* *************************************************** */
/* *************************************************** */
/* * * */
/* * ATOMIC FORMULA * */
/* * * */
/* *************************************************** */
/* *************************************************** */
/* ----------------------------------------------------------
Class for atomic formula
MOTHER CLASS : cAtomicF
DERIVED : cUnknownF / cObservationF / cConstantF
----------------------------------------------------------------*/
template <class TypeElem> class cAtomicF : public cImplemF<TypeElem>
{
public :
typedef cImplemF<TypeElem> tImplemF;
typedef typename tImplemF::tCoordF tCoordF;
typedef typename tCoordF::tFormula tFormula;
/// Should work always
std::string InfixPPrint() const override {return tImplemF::Name();}
/// Rule deriv=0 , work by default (constant and observations)
tFormula Derivate(int aK) const override {return tImplemF::mCoordF->Cste0();}
/// Generally nothing to do in atomic, their buffer has been filled witj adequate values
void ComputeBuf(int aK0,int aK1) override { }
std::vector<tFormula> Ref() const override{return std::vector<tFormula>();}
protected :
bool isAtomic() const override { return true;}
std::string GenCodeFormName() const override { return this->Name();}
std::string GenCodeShortExpr() const override { return this->GenCodeFormName();}
std::string GenCodeRef() const override { return this->GenCodeFormName();}
std::string GenCodeDef() const override { return mCodeValue;}
inline cAtomicF(tCoordF * aCoordF,const std::string& aName) :
tImplemF (aCoordF,aName)
{ }
std::string mCodeValue;
};
template <class TypeElem> class cUnknownF : public cAtomicF<TypeElem>
{
public :
typedef cAtomicF<TypeElem> tAtom;
typedef typename tAtom::tImplemF tImplemF;
typedef typename tImplemF::tCoordF tCoordF;
typedef typename tCoordF::tFormula tFormula;
const std::string & NameOperator() const override {static std::string s("UK"); return s;}
std::string InfixPPrint() const override {return tImplemF::Name();}
/// rule : dXi/dXj = delta(i,j)
tFormula Derivate(int aK) const override
{
return (aK==mNumUnk) ? tImplemF::mCoordF->Cste1() : tImplemF::mCoordF->Cste0();
}
friend tCoordF;
private :
inline cUnknownF(tCoordF * aCoordF,const std::string& aName,int aNum) :
tAtom (aCoordF,aName),
mNumUnk (aNum)
{
this->mCodeValue = "this->mVUk[aK][" + std::to_string(mNumUnk) + "]";
}
int mNumUnk; ///< Number of the Unknown; like : 0 for X0, 1 for X1 ...
};
template <class TypeElem> class cObservationF : public cAtomicF<TypeElem>
{
public :
typedef cAtomicF<TypeElem> tAtom;
typedef typename tAtom::tImplemF tImplemF;
typedef typename tImplemF::tCoordF tCoordF;
typedef typename tCoordF::tFormula tFormula;
friend tCoordF;
const std::string & NameOperator() const override {static std::string s("Obs"); return s;}
private :
inline cObservationF(tCoordF * aCoordF,const std::string & aName,int aNum) :
tAtom (aCoordF,aName),
mNum (aNum)
{
this->mCodeValue = "this->mVObs[aK][" + std::to_string(mNum) + "]";
}
int mNum; ///< Number of the Observation; like : 0 for V0, 1 for V1 ...
};
template <class TypeElem> class cConstantF : public cAtomicF<TypeElem>
{
public :
typedef cAtomicF<TypeElem> tAtom;
typedef typename tAtom::tImplemF tImplemF;
typedef typename tImplemF::tCoordF tCoordF;
typedef typename tCoordF::tFormula tFormula;
typedef typename tCoordF::tBuf tBuf;
friend tCoordF;
bool IsCste(const TypeElem &K) const override {return mVal==K;} ///< Here we know if we are a constant of value K
const TypeElem * ValCste() const override {return &mVal;}
const std::string & NameOperator() const override {static std::string s("Cste"); return s;}
protected :
inline cConstantF(tCoordF * aCoordF,const std::string & aName,int aNum,const TypeElem& aVal) :
tAtom (aCoordF,aName),
mNum (aNum),
mVal (aVal)
{
for (auto & aV : tImplemF::mBuf) aV = aVal; // Initialize buf with const val
std::stringstream ss;
// Precision that ensures that Num0 -> ASCII -> Num1 => Num1 == Num0
// May cause some odd but correct value for non exactly representable numbers
ss << std::setprecision(std::numeric_limits<decltype(mVal)>::max_digits10) << mVal;
this->mCodeValue = ss.str();
}
std::string GenCodeFormName() const override { return this->mCodeValue;}
int mNum;
const TypeElem mVal;
};
/* *************************************************** */
/* *************************************************** */
/* * * */
/* * cFormula / cImplemF / cCoordinatorF * */
/* * External Definition of methods * */
/* * * */
/* *************************************************** */
/* *************************************************** */
/* ---------------------- */
/* cFormula */
/* ---------------------- */
template <class T> cFormula<T> CreateCste(const T & aV,const cFormula<T> & aF)
{
return aF->CoordF()->CsteOfVal(aV);
}
/* ---------------------- */
/* cImplemF */
/* ---------------------- */
template <class TypeElem> int cImplemF<TypeElem>::RecursiveRec() const
{
int aRes = 1;
for (const auto & aF : Ref())
{
aRes += aF->RecursiveRec();
}
return aRes;
}
template <class TypeElem> void cImplemF<TypeElem>::CalcRecursiveDepth(std::vector<tFormula> & aVReached)
{
if (mDepth != -1) {
mUsedCnt++;
return; // if we were already here , nothing to do
}
mUsedCnt = 1;
for (const auto & aF : Ref())
{
aF->CalcRecursiveDepth(aVReached); // parse sub formula
mDepth = std::max(mDepth,aF->mDepth); // Memo max depth
}
mDepth++; // my depth is 1 + max of depth of my referenced formulas
aVReached.push_back(tFormula(this));
}
template <class TypeElem> void cImplemF<TypeElem>::TryReducAssoc()
{
for (auto & aF : Ref())
{
// F will not belong to the terminal command that will have to reparsed
// If we are in the config (A+B) + .. maybe the chain will grow later
if (aF->NameOperator() != NameOperator())
{
aF = aF->ReducAssoc();
}
aF->mReducAssocTried = true;
}
}
template <class TypeElem> cFormula<TypeElem> cImplemF<TypeElem>::VOper2(const tFormula & aF1,const tFormula &) const
{
InternalError("Incorrect virtual binary operation",this->mCoordF->Name());
return aF1;
}
template <class TypeElem>
std::string cImplemF<TypeElem>::GenCodeRef() const
{
if (UsedCnt() == 1) {
return GenCodeDef();
} else {
return GenCodeFormName();
}
}
/* ---------------------- */
/* cCoordinatorF */
/* ---------------------- */
template <class TypeElem>
std::vector<std::string> cCoordinatorF<TypeElem>::MakeAutomId(const std::string & aPrefix,int aNb)
{
std::vector<std::string> aRes;
for (int aK=0 ; aK<aNb ; aK++)
aRes.push_back(aPrefix+ std::to_string(aK));
return aRes;
}
template <class TypeElem>
cCoordinatorF<TypeElem>::cCoordinatorF
(
const std::string & aName,
int aSzBuf,
const std::vector<std::string> & aVNameUK,
const std::vector<std::string> & aVNameObs
) :
cCalculator<TypeElem>(aName,aSzBuf,aVNameUK,aVNameObs),
mNbCste (0),
mCste0 (CsteOfVal(0.0)),
mCste1 (CsteOfVal(1.0)),
mCste2 (CsteOfVal(2.0)),
mHeaderIncludeSymbDer ("SymbDer/SymbDer_Common.h"),
mDirGenCode ("")
{
// Generate all the function corresponding to unknown
for (size_t aNumUK=0 ; aNumUK<this->mNbUK ; aNumUK++)
{
tFormula aFuncUK(new cUnknownF<TypeElem>(this,aVNameUK[aNumUK],aNumUK)); // Create it
mVFormUnknowns.push_back(aFuncUK); // Push it in vector of coordinat func
AddFormula(aFuncUK); // Add to all func
}
// Generate all the function corresponding to observations
for (size_t aNumObs=0 ; aNumObs<this->mNbObs ; aNumObs++)
{
tFormula aFuncObs(new cObservationF<TypeElem>(this,aVNameObs[aNumObs],aNumObs)); // Create it
mVFormObservations.push_back(aFuncObs); // Push it in vector of coordinat func
AddFormula(aFuncObs); // Add to all func
}
}
template <class TypeElem>
cCoordinatorF<TypeElem>::cCoordinatorF(const std::string &aName, int aSzBuf, int aNbUK, int aNbObs) :
cCoordinatorF<TypeElem>(aName,aSzBuf,MakeAutomId("X",aNbUK),MakeAutomId("V",aNbObs))
{
}
template <class TypeElem>
cCoordinatorF<TypeElem>::~cCoordinatorF()
{
for (auto & aForm : mVAllFormula)
{
aForm.FreeMem();
}
}
template <class TypeElem>
cFormula<TypeElem> cCoordinatorF<TypeElem>::CsteOfVal(const TypeElem & aCste)
{
tFormula & aRef = mDicoCste[aCste];
if (aRef.IsNull()) // If it was not existing, the map contain now the def element
{
// The ! is used to make constant first in alphab order, used for reduction ?
aRef=tFormula(new cConstantF<TypeElem>(this,"_C"+std::to_string(mNbCste),mNbCste,aCste));
mNbCste++;
AddFormula(aRef);
}
return aRef;
}
template <class TypeElem>
cFormula <TypeElem> cCoordinatorF<TypeElem>::FuncOfName(const std::string & aName) const
{
const auto & anIt = mDicoFunc.find(aName);
if (anIt == mDicoFunc.end()) InternalError ("Try to acces non existing name :[" + aName + "]",this->Name());
return anIt->second;
}
template <class TypeElem>
void cCoordinatorF<TypeElem>::SetNewUks(const std::vector<TypeElem> & aVUks)
{
for (size_t aK=0 ; aK<aVUks.size() ; aK++) // Init Vals of formulas buffer
{
mVFormUnknowns[aK]->SetBuf(this->mNbInBuf,aVUks[aK]);
}
}
template <class TypeElem>
void cCoordinatorF<TypeElem>::SetNewObs(const std::vector<TypeElem> & aVObs)
{
for (size_t aK=0 ; aK<aVObs.size() ; aK++) // Init Vals of formulas buffer
{
mVFormObservations[aK]->SetBuf(this->mNbInBuf,aVObs[aK]);
}
}
template <class TypeElem>
void cCoordinatorF<TypeElem>::SetCurFormulasWithDerivative(const std::vector<tFormula> & aVF)
{
std::vector<tFormula> aVWDer;
for (const auto & aF : aVF)
{
aVWDer.push_back(aF);
for (size_t aUK=0 ; aUK<this->mNbUK ; aUK++)
{
aVWDer.push_back(aF->Derivate(aUK));
}
}
SetCurFormulas(aVWDer);
this->mWithDer = true;
this->mSzInterval = 1+this->mNbUK;
this->mNbElem = aVF.size();
}
template <class TypeElem>
void cCoordinatorF<TypeElem>::SetCurFormulas(const std::vector<tFormula> & aVF0)
{
std::vector<tFormula> aVF;
for(auto aF : aVF0)
{
if (! aF->ReducAssocTried())
{
aF = tFormula(aF->ReducAssoc());
// std::cout << "GGGGGGGG " << aF->Name() << " \n";
}
aVF.push_back(aF);
}
this->mWithDer=false;
this->mSzInterval = 1;
this->mNbElem = aVF0.size();
mVCurF = aVF;
// Erase previous
for (auto & aF : mVReachedF)
aF->SetDepth(-1);
mVReachedF.clear();
// Compute depth for topologicall sort
for (auto & aF : mVCurF)
{
aF->CalcRecursiveDepth(mVReachedF);
}
// Use depth to have topological sort
// In fact it is probably not necessary to make this sort, initial order of reaching order
// should work; by the way : no dammage ..
std::sort
(
mVReachedF.begin(),
mVReachedF.end(),
[](const tFormula & aF1,const tFormula &aF2) {return aF1->Depth() < aF2->Depth();}
);
// Make Buf of Res to have right size
for (auto & aLine : this->mBufLineRes)
{
aLine.resize(mVCurF.size());
}
}
template <class TypeElem>
void cCoordinatorF<TypeElem>::DoEval()
{
// Make the real hard stuff, compute the data, the depedancy ordering should make it coherent
#ifdef _OPENMP
#pragma omp parallel
{
size_t thread_num = omp_get_thread_num();
size_t num_threads = omp_get_num_threads();
size_t start = thread_num * this->mNbInBuf / num_threads;
size_t end = (thread_num + 1) * this->mNbInBuf / num_threads;
if (end>start)
{
for (auto & aF : mVReachedF)
{
aF->ComputeBuf(start,end);
}
}
}
#else
for (auto & aF : mVReachedF)
{
aF->ComputeBuf(0,this->mNbInBuf);
}
#endif
for (size_t aKLine=0 ; aKLine<this->mNbInBuf ; aKLine++)
{
std::vector<TypeElem> & aLine = this->mBufLineRes[aKLine];
for (size_t aKFunc=0 ; aKFunc< mVCurF.size() ; aKFunc++)
aLine[aKFunc] = mVCurF[aKFunc]->GetBuf(aKLine);
}
}
template <class TypeElem>
void cCoordinatorF<TypeElem>::ShowStackFunc() const
{
for (const auto & aForm : mVAllFormula)
{
if (aForm->Depth()==-1)
std::cout << "---" ;
else
std::cout << "-" << aForm->Depth() << "-";
std::cout << aForm->UsedCnt() << "- ";
std::cout << aForm->NameGlob() << " => " << aForm->Name();
const TypeElem * aPV = aForm->ValCste();
if (aPV)
std::cout << " ; Val=" << *aPV;
std::cout << "\n";
}
std::cout << "REACHED ";
for (const auto & aForm : mVReachedF)
{
std::cout << aForm->NumGlob() << " ";
}
std::cout << "\n";
std::cout << "CUR ";
for (const auto & aForm : mVCurF)
{
std::cout << aForm->NumGlob() << " ";
}
std::cout << "\n";
}
// return
inline std::string VStr2CPP(const std::vector<std::string> & aVS)
{
std::string aRes = "{";
for (size_t aK=0 ; aK<aVS.size() ; aK++)
{
if (aK!=0)
aRes = aRes + "," ;
aRes = aRes + "\""+ aVS[aK] + "\"" ;
}
return aRes+ "}";
}
template <class TypeElem>
std::pair<std::string,std::string> cCoordinatorF<TypeElem>::GenCodeCommon(const std::string& aPrefix, std::string aTypeName, bool isShortExpr) const
{
std::string aName = this->Name();
if (aName.size() == 0)
UserSError("Formula name is empty.",this->Name());
for (auto &c : aName) {
if (!std::isalnum(c) && c != '_')
UserSError("Formula name is not a valid C++ identifier: '_,a..z,A..Z,0..9' only.",this->Name());
}
std::string aClassName = "c" + aName;
if (aTypeName.size()==0)
aTypeName = this->TypeElemName();
bool isTemplated = aTypeName=="template<>";
if (isTemplated)
aTypeName = "TypeElem";
std::string aVectorName = "std::vector<" + aTypeName + ">";
if (! isShortExpr)
aClassName = aClassName + "LongExpr";
std::string aParentClass = "cCompiledCalculator<" + aTypeName + ">";
std::string aFileName = aPrefix + aClassName;
std::ofstream aOs(mDirGenCode + aFileName + ".h");
if (!aOs)
return std::make_pair(std::string(),std::string());
aOs << "#ifdef _OPENMP\n"
"#include <omp.h>\n"
"#endif\n"
"#include \"" << mHeaderIncludeSymbDer << "\"\n"
"\n"
"namespace NS_SymbolicDerivative {\n\n";
if (isTemplated) {
aOs << "template<typename TypeElem>\n";
}
aOs << "class " << aClassName << " : public " << aParentClass << "\n"
"{\n"
"public:\n"
" typedef " << aParentClass << " Super;\n"
" " << aClassName << "(size_t aSzBuf) : \n";
aOs
<< " Super(\n"
<< " \"" << aName << "\",\n"
<< " " << " aSzBuf,//SzBuf\n"
<< " " << this->mNbElem << ",//NbElement\n"
<< " " << mVCurF.size() << ",//SzOfLine\n"
<< " " << VStr2CPP(this->NamesUk()) << ",// Name Unknowns\n"
<< " " << VStr2CPP(this->NamesObs()) << ",// Name Observations\n"
<< " " << this->mWithDer << ",//With derivative ?\n"
<< " " << this->mSzInterval << "//Size of interv\n"
<< " )\n";
aOs<<
" {\n"
" }\n"
" static std::string FormulaName() { return \"" << aName << "\";}\n"
"protected:\n"
" virtual void DoEval() override;\n"
"private:\n"
" static Super* Alloc(int aSzBuf) {return new " << aClassName << "(aSzBuf); }\n"
" friend void cName2CalcRegisterAll(void);\n"
" static void Register() {cName2Calc<TypeElem>::Register(FormulaName()," << aClassName << "::Alloc);}\n"
"};\n"
"\n";
if (! isTemplated) {
aOs << "} // namespace NS_SymbolicDerivative\n";
aOs = std::ofstream(mDirGenCode+aFileName + ".cpp");
if (!aOs)
return std::make_pair(std::string(),std::string());
aOs << "#include \"" + aFileName + ".h\"\n"
"\n"
"namespace NS_SymbolicDerivative {\n"
"\n"
"void " << aClassName << "::DoEval()\n";
} else {
aOs << "\n"
"template<typename TypeElem>\n"
"void " << aClassName << "<TypeElem>::DoEval()\n";
}
aOs << "{\n"
"#ifdef _OPENMP\n"
"#pragma omp parallel for\n"
"#endif\n"
" for (size_t aK=0; aK < this->mNbInBuf; aK++) {\n"
"// Declare local vars in loop to make them per thread\n";
for (auto & aForm : mVFormUnknowns)
aOs << " " << aTypeName << " &" << aForm->GenCodeFormName() << " = " << aForm->GenCodeDef() << ";\n";
for (const auto & aForm : mVFormObservations)
aOs << " " << aTypeName << " &" << aForm->GenCodeFormName() << " = " << aForm->GenCodeDef() << ";\n";
if (isShortExpr) {
for (const auto & aForm : mVReachedF) {
if (!aForm->isAtomic())
aOs << " " << aTypeName << " " << aForm->GenCodeFormName() << " = " << aForm->GenCodeShortExpr() << ";\n";
}
for (size_t i=0; i<mVCurF.size(); i++)
aOs << " this->mBufLineRes[aK][" << i << "] = " << mVCurF[i]->GenCodeFormName() << ";\n";
} else {
for (const auto & aForm : mVReachedF) {
if (aForm->UsedCnt() != 1 && !aForm->isAtomic()) {
aOs << " " << aTypeName << " " << aForm->GenCodeFormName() << " = " << aForm->GenCodeDef() << ";\n";
}
}
for (size_t i=0; i<mVCurF.size(); i++)
aOs << " this->mBufLineRes[aK][" << i << "] = " << mVCurF[i]->GenCodeRef() << ";\n";
}
aOs << " }\n"
"}\n\n";
aOs << "} // namespace NS_SymbolicDerivative\n";
return std::make_pair(aClassName, aFileName);
}
template<>
inline std::string cCoordinatorF<double>::TypeElemName() const {return "double";}
template<>
inline std::string cCoordinatorF<float>::TypeElemName() const {return "float";}
template<typename T>
struct Detect_if_TypeElemName_is_defined : std::false_type
{ };
template<class TypeElem>
inline std::string cCoordinatorF<TypeElem>::TypeElemName() const
{
static_assert( Detect_if_TypeElemName_is_defined<TypeElem>::value , "** You must define cCoordinatorF::TypeElemName() for you type **");
return "";
}
} // NS_Symbolic_Derivative
#include "SymbDer_UnaryOp.h"
#include "SymbDer_BinaryOp.h"
/*
https://www.itl.nist.gov/div898/strd/nls/data/ratkowsky3.shtml
http://en.wikipedia.org/wiki/Automatic_differentiation
https://git.irc.umbc.edu/photorig/openMVG/blob/260584fda68dce095e279362efd24a2d7d7cf5d9/src/third_party/ceres-solver/include/ceres/jet.h
https://mc-stan.org/
http://www.met.reading.ac.uk/clouds/adept/array_features.html
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.89.7749&rep=rep1&type=pdf
http://www.autodiff.org/
*/
#endif // _SymbolicDerivatives_H_
|
overlapping.h | #include "CSC.h"
#include "align.h"
#include "common.h"
#include "../kmercode/hash_funcs.h"
#include "../kmercode/Kmer.hpp"
#include "../kmercode/Buffer.h"
#include "../kmercode/common.h"
#include "../kmercode/fq_reader.h"
#include "../kmercode/ParallelFASTQ.h"
#include <seqan/sequence.h>
#include <seqan/align.h>
#include <seqan/score.h>
#include <seqan/modifier.h>
#include <seqan/seeds.h>
#include <omp.h>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <cstdlib>
#include <algorithm>
#include <ctype.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <stdint.h>
using namespace seqan;
typedef Seed<Simple> TSeed;
typedef SeedSet<TSeed> TSeedSet;
#define PERCORECACHE (1024 * 1024)
#define TIMESTEP
#ifndef PRINT
#define PRINT
#endif
//#define THREADLIMIT
//#define MAX_NUM_THREAD 1
//#define OSX
//#define LINUX
//#define RAM
#ifdef OSX
#include <mach/mach.h>
#include <mach/vm_statistics.h>
#include <mach/mach_types.h>
#include <mach/mach_init.h>
#include <mach/mach_host.h>
#endif
#ifdef LINUX
#include "sys/types.h"
#include "sys/sysinfo.h"
struct sysinfo info;
#endif
double safety_net = 1.2;
/*
Multithreaded prefix sum
Inputs:
in: an input array
size: the length of the input array "in"
nthreads: number of threads used to compute the prefix sum
Output:
return an array of size "size+1"
the memory of the output array is allocated internallay
Example:
in = [2, 1, 3, 5]
out = [0, 2, 3, 6, 11]
*/
template <typename T>
T* prefixsum(T* in, int size, int nthreads)
{
std::vector<T> tsum(nthreads+1);
tsum[0] = 0;
T* out = new T[size+1];
out[0] = 0;
T* psum = &out[1];
#pragma omp parallel
{
int ithread = omp_get_thread_num();
T sum = 0;
#pragma omp for schedule(static)
for (int i=0; i<size; i++)
{
sum += in[i];
psum[i] = sum;
}
tsum[ithread+1] = sum;
#pragma omp barrier
T offset = 0;
for(int i=0; i<(ithread+1); i++)
{
offset += tsum[i];
}
#pragma omp for schedule(static)
for (int i=0; i<size; i++)
{
psum[i] += offset;
}
}
return out;
}
/* fix according to PAF format */
void toPAF(size_t& begpV, size_t& endpV, const int lenV, size_t& begpH, size_t& endpH, const int lenH, const string& rev)
{
/* first, extend to the end of the sequences */
if(begpH < begpV)
{
begpV = begpV - begpH;
begpH = 0;
}
else
{
begpH = begpH - begpV;
begpV = 0;
}
if((lenH - endpH) < (lenV - endpV))
{
endpV = endpV + (lenH - endpH);
endpH = lenH;
}
else
{
endpH = endpH + (lenV - endpV);
endpV = lenV;
}
/* second, (possibly) convert back the seqH seed position according to the original strand */
if(rev == "c")
{
size_t temp = begpH;
begpH = lenH-endpH;
endpH = lenH-temp;
}
}
// estimate the number of floating point operations of SpGEMM
template <typename IT, typename NT>
IT* estimateFLOP(const CSC<IT,NT> & A, const CSC<IT,NT> & B, bool lowtriout)
{
if(A.isEmpty() || B.isEmpty())
{
return NULL;
}
int numThreads = 1;
#pragma omp parallel
{
numThreads = omp_get_num_threads();
}
IT* colflopC = new IT[B.cols]; // nnz in every column of C
#pragma omp parallel for
for(IT i=0; i< B.cols; ++i)
{
colflopC[i] = 0;
}
#pragma omp parallel for
for(IT i=0; i < B.cols; ++i)
{
size_t nnzcolB = B.colptr[i+1] - B.colptr[i]; //nnz in the current column of B
int myThread = omp_get_thread_num();
for (IT j = B.colptr[i]; j < B.colptr[i+1]; ++j) // all nonzeros in that column of B
{
IT col2fetch = B.rowids[j]; // find the row index of that nonzero in B, which is the column to fetch in A
IT nnzcolA = 0;
if(lowtriout)
{
for(IT k = A.colptr[col2fetch]; k < A.colptr[col2fetch+1]; ++k) // all nonzeros in this column of A
{
// i is the column_id of the output and A.rowids[k] is the row_id of the output
if(i < A.rowids[k])
{
++nnzcolA;
}
}
}
else
{
nnzcolA = A.colptr[col2fetch+1]- A.colptr[col2fetch]; // nonzero count of that column of A
}
colflopC[i] += nnzcolA;
}
}
return colflopC;
}
// estimate space for result of SpGEMM with Hash
template <typename IT, typename NT>
IT* estimateNNZ_Hash(const CSC<IT,NT> & A, const CSC<IT,NT> & B, const size_t *flopC, bool lowtriout)
{
if(A.isEmpty() || B.isEmpty())
{
return NULL;
}
int numThreads = 1;
#pragma omp parallel
{
numThreads = omp_get_num_threads();
}
IT* colnnzC = new IT[B.cols]; // nnz in every column of C
#pragma omp parallel for
for(IT i=0; i< B.cols; ++i)
{
colnnzC[i] = 0;
}
#pragma omp parallel for
for(IT i=0; i < B.cols; ++i) // for each column of B
{
size_t nnzcolB = B.colptr[i+1] - B.colptr[i]; //nnz in the current column of B
int myThread = omp_get_thread_num();
// Hash
const size_t minHashTableSize = 16;
const size_t hashScale = 107;
// Initialize hash tables
size_t ht_size = minHashTableSize;
while(ht_size < flopC[i]) //ht_size is set as 2^n
{
ht_size <<= 1;
}
std::vector<IT> globalHashVec(ht_size);
for(size_t j=0; j < ht_size; ++j)
{
globalHashVec[j] = -1;
}
for (IT j = B.colptr[i]; j < B.colptr[i+1]; ++j) // all nonzeros in that column of B
{
IT col2fetch = B.rowids[j]; // find the row index of that nonzero in B, which is the column to fetch in A
for(IT k = A.colptr[col2fetch]; k < A.colptr[col2fetch+1]; ++k) // all nonzeros in this column of A
{
IT key = A.rowids[k];
if(lowtriout && i >= key) // i is the column_id of the output and key is the row_id of the output
continue;
IT hash = (key*hashScale) & (ht_size-1);
while (1) //hash probing
{
if (globalHashVec[hash] == key) //key is found in hash table
{
break;
}
else if (globalHashVec[hash] == -1) //key is not registered yet
{
globalHashVec[hash] = key;
colnnzC[i] ++;
break;
}
else //key is not found
{
hash = (hash+1) & (ht_size-1); // don't exit the while loop yet
}
}
}
}
}
return colnnzC;
}
//! Hash based column-by-column spgemm algorithm. Based on earlier code by Buluc, Azad, and Nagasaka
//! If lowtriout= true, then only creates the lower triangular part: no diagonal and no upper triangular
template <typename IT, typename NT, typename MultiplyOperation, typename AddOperation, typename FT>
void LocalSpGEMM(IT & start, IT & end, const CSC<IT,NT> & A, const CSC<IT,NT> & B, MultiplyOperation multop, AddOperation addop,
vector<IT> * RowIdsofC, vector<FT> * ValuesofC, IT* colptrC, bool lowtriout)
{
#pragma omp parallel for
for(IT i = start; i<end; ++i) // for bcols of B (one block)
{
const IT minHashTableSize = 16;
const IT hashScale = 107;
size_t nnzcolC = colptrC[i+1] - colptrC[i]; //nnz in the current column of C (=Output)
IT ht_size = minHashTableSize;
while(ht_size < nnzcolC) //ht_size is set as 2^n
{
ht_size <<= 1;
}
std::vector< std::pair<IT,FT>> globalHashVec(ht_size);
// Initialize hash tables
for(IT j=0; j < ht_size; ++j)
{
globalHashVec[j].first = -1;
}
for (IT j = B.colptr[i]; j < B.colptr[i+1]; ++j) // all nonzeros in that column of B
{
IT col2fetch = B.rowids[j]; // find the row index of that nonzero in B, which is the column to fetch in A
NT valueofB = B.values[j];
for(IT k = A.colptr[col2fetch]; k < A.colptr[col2fetch+1]; ++k) // all nonzeros in this column of A
{
IT key = A.rowids[k];
if(lowtriout && i >= key) // i is the column_id of the output and key is the row_id of the output
continue;
FT result = multop(A.values[k], valueofB);
IT hash = (key*hashScale) & (ht_size-1);
while (1) //hash probing
{
if (globalHashVec[hash].first == key) //key is found in hash table
{
globalHashVec[hash].second = addop(result, globalHashVec[hash].second);
break;
}
else if (globalHashVec[hash].first == -1) //key is not registered yet
{
globalHashVec[hash].first = key;
globalHashVec[hash].second = result;
break;
}
else //key is not found
{
hash = (hash+1) & (ht_size-1); // don't exit the while loop yet
}
}
}
}
// gather non-zero elements from hash table (and then sort them by row indices if needed)
IT index = 0;
for (IT j=0; j < ht_size; ++j)
{
if (globalHashVec[j].first != -1)
{
globalHashVec[index++] = globalHashVec[j];
}
}
#ifdef SORTCOLS
std::sort(globalHashVec.begin(), globalHashVec.begin() + index, sort_less<IT, NT>);
#endif
RowIdsofC[i-start].resize(index);
ValuesofC[i-start].resize(index);
for (IT j=0; j< index; ++j)
{
RowIdsofC[i-start][j] = globalHashVec[j].first;
ValuesofC[i-start][j] = globalHashVec[j].second;
}
}
}
double estimateMemory(const BELLApars & b_pars)
{
double free_memory;
if (b_pars.userDefMem)
{
free_memory = b_pars.totalMemory * 1024 * 1024;
}
else
{
#if defined (OSX) // OSX-based memory consumption implementation
vm_size_t page_size;
mach_port_t mach_port;
mach_msg_type_number_t count;
vm_statistics64_data_t vm_stats;
mach_port = mach_host_self();
count = sizeof(vm_stats) / sizeof(natural_t);
if (KERN_SUCCESS == host_page_size(mach_port, &page_size) &&
KERN_SUCCESS == host_statistics64(mach_port, HOST_VM_INFO,
(host_info64_t)&vm_stats, &count))
{
free_memory = (double) vm_stats.free_count * (double)page_size;
}
#elif defined (LINUX) // LINUX-based memory consumption implementation
if(sysinfo(&info) != 0)
{
return false;
}
free_memory = info.freeram * info.mem_unit;
free_memory += info.freeswap * info.mem_unit;
free_memory += info.bufferram * info.mem_unit;
#else
free_memory = b_pars.totalMemory * 1024 * 1024; // memory is neither user-supplied nor can be estimated, so use BELLA's default
#endif
}
return free_memory;
}
void PostAlignDecision(const seqAnResult & maxExtScore, const readType_ & read1, const readType_ & read2,
const BELLApars & b_pars, double ratioPhi, int count, stringstream & myBatch, size_t & outputted,
size_t & numBasesAlignedTrue, size_t & numBasesAlignedFalse, bool & passed)
{
auto maxseed = maxExtScore.seed; // returns a seqan:Seed object
// {begin/end}Position{V/H}: Returns the begin/end position of the seed in the query (vertical/horizonral direction)
// these four return seqan:Tposition objects
auto begpV = beginPositionV(maxseed);
auto endpV = endPositionV(maxseed);
auto begpH = beginPositionH(maxseed);
auto endpH = endPositionH(maxseed);
// get references for better naming
const string& seq1 = read1.seq;
const string& seq2 = read2.seq;
int read1len = seq1.length();
int read2len = seq2.length();
int diffCol = endpV - begpV;
int diffRow = endpH - begpH;
int minLeft = min(begpV, begpH);
int minRight = min(read2len - endpV, read1len - endpH);
int ov = minLeft+minRight+(diffCol+diffRow)/2;
if(b_pars.adapThr)
{
double newThr = (1-b_pars.deltaChernoff)*(ratioPhi*(double)ov);
if((double)maxExtScore.score > newThr)
{
if(b_pars.alignEnd)
{
if(toEnd(begpV, endpV, read2len, begpH, endpH, read1len, b_pars.relaxMargin))
passed = true;
}
else
{
passed = true;
}
}
}
else if(maxExtScore.score > b_pars.defaultThr)
{
if(b_pars.alignEnd)
{
if(toEnd(begpV, endpV, read2len, begpH, endpH, read1len, b_pars.relaxMargin))
passed = true;
}
else
{
passed = true;
}
}
if(passed)
{
if(!b_pars.outputPaf) // BELLA output format
{
myBatch << read2.nametag << '\t' << read1.nametag << '\t' << count << '\t' << maxExtScore.score << '\t' << ov << '\t' << maxExtScore.strand << '\t' <<
begpV << '\t' << endpV << '\t' << read2len << '\t' << begpH << '\t' << endpH << '\t' << read1len << endl;
// column seq name
// row seq name
// number of shared k-mer
// alignment score
// overlap estimation
// strand (n/c)
// column seq start
// column seq end
// column seq length
// row seq start
// row seq end
// row seq length
}
else // PAF format is the output format used by minimap/minimap2: https://github.com/lh3/miniasm/blob/master/PAF.md
{
/* field adjustment to match the PAF format */
toPAF(begpV, endpV, read2len, begpH, endpH, read1len, maxExtScore.strand);
/* re-compute overlap estimation with extended alignment to the edges */
diffCol = endpV - begpV;
diffRow = endpH - begpH;
minLeft = min(begpV, begpH);
minRight = min(read2len - endpV, read1len - endpH);
ov = minLeft+minRight+(diffCol+diffRow)/2;
string pafstrand; // maxExtScore not modifiable
int mapq = 255; // mapping quality (0-255; 255 for missing)
if(maxExtScore.strand == "n") pafstrand = "+";
else pafstrand = "-";
// If PAF is generated from an alignment, column 10 equals the number of sequence matches,
// and column 11 equals the total number of sequence matches, mismatches, insertions and deletions in the alignment
myBatch << read2.nametag << '\t' << read2len << '\t' << begpV << '\t' << endpV << '\t' << pafstrand << '\t' <<
read1.nametag << '\t' << read1len << '\t' << begpH << '\t' << endpH << '\t' << maxExtScore.score << '\t' << ov << '\t' << mapq << endl;
// column seq name
// column seq length
// column seq start
// column seq end
// strand (+/-)
// row seq name
// row seq length
// row seq start
// row seq end
// number of residue matches (alignment score)
// alignment block length (overlap length)
// mapping quality (0-255; 255 for missing)
}
++outputted;
numBasesAlignedTrue += (endpV-begpV);
}
else
{
numBasesAlignedFalse += (endpV-begpV);
}
}
template <typename IT, typename FT>
auto RunPairWiseAlignments(IT start, IT end, IT offset, IT * colptrC, IT * rowids, FT * values, const readVector_ & reads,
int kmer_len, int xdrop, char* filename, const BELLApars & b_pars, double ratioPhi)
{
size_t alignedpairs = 0;
size_t alignedbases = 0;
size_t totalreadlen = 0;
size_t totaloutputt = 0;
size_t totsuccbases = 0;
size_t totfailbases = 0;
int numThreads = 1;
#pragma omp parallel
{
numThreads = omp_get_num_threads();
}
vector<stringstream> vss(numThreads); // any chance of false sharing here? depends on how stringstream is implemented. optimize later if needed...
#pragma omp parallel for
for(IT j = start; j<end; ++j) // for (end-start) columns of A^T A (one block)
{
size_t numAlignmentsThread = 0;
size_t numBasesAlignedThread = 0;
size_t readLengthsThread = 0;
size_t numBasesAlignedTrue = 0;
size_t numBasesAlignedFalse = 0;
size_t outputted = 0;
int ithread = omp_get_thread_num();
for (IT i = colptrC[j]; i < colptrC[j+1]; ++i) // all nonzeros in that column of A^T A
{
size_t rid = rowids[i-offset]; // row id
size_t cid = j; // column id
const string& seq1 = reads[rid].seq; // get reference for readibility
const string& seq2 = reads[cid].seq; // get reference for readibility
int seq1len = seq1.length();
int seq2len = seq2.length();
spmatPtr_ val = values[i-offset];
if(!b_pars.skipAlignment) // fix -z to not print
{
#ifdef TIMESTEP
numAlignmentsThread++;
readLengthsThread = readLengthsThread + seq1len + seq2len;
#endif
seqAnResult maxExtScore;
bool passed = false;
if(val->count == 1)
{
auto it = val->pos.begin();
int i = it->first, j = it->second;
maxExtScore = alignSeqAn(seq1, seq2, seq1len, i, j, xdrop, kmer_len);
PostAlignDecision(maxExtScore, reads[rid], reads[cid], b_pars, ratioPhi, val->count, vss[ithread], outputted, numBasesAlignedTrue, numBasesAlignedFalse, passed);
}
else
{
for(auto it = val->pos.begin(); it != val->pos.end(); ++it) // if !b_pars.allKmer this should be at most two cycle
{
int i = it->first, j = it->second;
maxExtScore = alignSeqAn(seq1, seq2, seq1len, i, j, xdrop, kmer_len);
PostAlignDecision(maxExtScore, reads[rid], reads[cid], b_pars, ratioPhi, val->count, vss[ithread], outputted, numBasesAlignedTrue, numBasesAlignedFalse, passed);
if(passed)
break;
}
}
#ifdef TIMESTEP
numBasesAlignedThread += endPositionV(maxExtScore.seed)-beginPositionV(maxExtScore.seed);
#endif
}
else // if skipAlignment == false do alignment, else save just some info on the pair to file
{
vss[ithread] << reads[cid].nametag << '\t' << reads[rid].nametag << '\t' << val->count << '\t' <<
seq2len << '\t' << seq1len << endl;
++outputted;
}
} // all nonzeros in that column of A^T A
#ifdef TIMESTEP
#pragma omp critical
{
alignedpairs += numAlignmentsThread;
alignedbases += numBasesAlignedThread;
totalreadlen += readLengthsThread;
totaloutputt += outputted;
totsuccbases += numBasesAlignedTrue;
totfailbases += numBasesAlignedFalse;
}
#endif
} // all columns from start...end (omp for loop)
double outputting = omp_get_wtime();
int64_t * bytes = new int64_t[numThreads];
for(int i=0; i< numThreads; ++i)
{
vss[i].seekg(0, ios::end);
bytes[i] = vss[i].tellg();
vss[i].seekg(0, ios::beg);
}
int64_t bytestotal = std::accumulate(bytes, bytes+numThreads, static_cast<int64_t>(0));
std::ofstream ofs(filename, std::ios::binary | std::ios::app);
#ifdef PRINT
cout << "Creating or appending to output file with " << (double)bytestotal/(double)(1024 * 1024) << " MB" << endl;
#endif
ofs.seekp(bytestotal - 1);
ofs.write("", 1); // this will likely create a sparse file so the actual disks won't spin yet
ofs.close();
#pragma omp parallel
{
int ithread = omp_get_thread_num();
FILE *ffinal;
if ((ffinal = fopen(filename, "rb+")) == NULL) // then everyone fills it
{
fprintf(stderr, "File %s failed to open at thread %d\n", filename, ithread);
}
int64_t bytesuntil = std::accumulate(bytes, bytes+ithread, static_cast<int64_t>(0));
fseek (ffinal , bytesuntil , SEEK_SET );
std::string text = vss[ithread].str();
fwrite(text.c_str(),1, bytes[ithread] ,ffinal);
fflush(ffinal);
fclose(ffinal);
}
delete [] bytes;
double timeoutputt = omp_get_wtime()-outputting;
return make_tuple(alignedpairs, alignedbases, totalreadlen, totaloutputt, totsuccbases, totfailbases, timeoutputt);
}
/**
* Sparse multithreaded GEMM.
**/
template <typename IT, typename NT, typename FT, typename MultiplyOperation, typename AddOperation>
void HashSpGEMM(const CSC<IT,NT> & A, const CSC<IT,NT> & B, MultiplyOperation multop, AddOperation addop, const readVector_ & reads,
FT & getvaluetype, int kmer_len, int xdrop, char* filename, const BELLApars & b_pars, double ratioPhi)
{
double free_memory = estimateMemory(b_pars);
#ifdef PRINT
cout << "Available RAM is assumed to be: " << free_memory / (1024 * 1024) << " MB" << endl;
#endif
int numThreads = 1;
#pragma omp parallel
{
numThreads = omp_get_num_threads();
}
IT* flopC = estimateFLOP(A, B, true);
IT* flopptr = prefixsum<IT>(flopC, B.cols, numThreads);
IT flops = flopptr[B.cols];
#ifdef PRINT
cout << "FLOPS is " << flops << endl;
#endif
IT* colnnzC = estimateNNZ_Hash(A, B, flopC, true);
IT* colptrC = prefixsum<IT>(colnnzC, B.cols, numThreads); // colptrC[i] = rolling sum of nonzeros in C[1...i]
delete [] colnnzC;
delete [] flopptr;
delete [] flopC;
IT nnzc = colptrC[B.cols];
double compression_ratio = (double)flops / nnzc;
uint64_t required_memory = safety_net * nnzc * (sizeof(FT)+sizeof(IT)); // required memory to form the output
int stages = std::ceil((double) required_memory/ free_memory); // form output in stages
uint64_t nnzcperstage = free_memory / (safety_net * (sizeof(FT)+sizeof(IT)));
#ifdef PRINT
cout << "nnz(output): " << nnzc << " | free memory: " << free_memory << " | required memory: " << required_memory << endl;
cout << "Stages: " << stages << " | max nnz per stage: " << nnzcperstage << endl;
#endif
IT * colStart = new IT[stages+1]; // one array is enough to set stage boundaries
colStart[0] = 0;
for(int i = 1; i < stages; ++i) // colsPerStage is no longer fixed (helps with potential load imbalance)
{
// std::upper_bound returns an iterator pointing to the first element
// in the range [first, last) that is greater than value, or last if no such element is found
auto upper = std::upper_bound(colptrC, colptrC+B.cols+1, i*nnzcperstage );
colStart[i] = upper - colptrC - 1; // we don't want the element that exceeds our budget, we want the one just before that
}
colStart[stages] = B.cols;
for(int b = 0; b < stages; ++b)
{
#ifdef TIMESTEP
double ovl = omp_get_wtime();
#endif
vector<IT> * RowIdsofC = new vector<IT>[colStart[b+1]-colStart[b]]; // row ids for each column of C (bunch of cols)
vector<FT> * ValuesofC = new vector<FT>[colStart[b+1]-colStart[b]]; // values for each column of C (bunch of cols)
LocalSpGEMM(colStart[b], colStart[b+1], A, B, multop, addop, RowIdsofC, ValuesofC, colptrC, true);
#ifdef TIMESTEP
double ov2 = omp_get_wtime();
cout << "\nColumns [" << colStart[b] << " - " << colStart[b+1] << "] overlap time: " << ov2-ovl << "s" << endl;
#endif
IT endnz = colptrC[colStart[b+1]];
IT begnz = colptrC[colStart[b]];
IT * rowids = new IT[endnz-begnz];
FT * values = new FT[endnz-begnz];
for(IT i=colStart[b]; i<colStart[b+1]; ++i) // combine step
{
IT loccol = i-colStart[b];
IT locnz = colptrC[i]-begnz;
copy(RowIdsofC[loccol].begin(), RowIdsofC[loccol].end(), rowids + locnz);
copy(ValuesofC[loccol].begin(), ValuesofC[loccol].end(), values + locnz);
}
delete [] RowIdsofC;
delete [] ValuesofC;
tuple<size_t, size_t, size_t, size_t, size_t, size_t, double> alignstats; // (alignedpairs, alignedbases, totalreadlen, outputted, alignedtrue, alignedfalse, timeoutputt)
alignstats = RunPairWiseAlignments(colStart[b], colStart[b+1], begnz, colptrC, rowids, values, reads, kmer_len, xdrop, filename, b_pars, ratioPhi);
#ifdef TIMESTEP
if(!b_pars.skipAlignment)
{
double elapsed = omp_get_wtime()-ov2;
double aligntime = elapsed-get<6>(alignstats); // substracting outputting time
cout << "\nColumns [" << colStart[b] << " - " << colStart[b+1] << "] alignment time: " << aligntime << "s | alignment rate: " << static_cast<double>(get<1>(alignstats))/aligntime;
cout << " bases/s | average read length: " <<static_cast<double>(get<2>(alignstats))/(2* get<0>(alignstats));
cout << " | read pairs aligned this stage: " << get<0>(alignstats) << endl;
cout << "Average length of successful alignment " << static_cast<double>(get<4>(alignstats)) / get<3>(alignstats) << " bps" << endl;
cout << "Average length of failed alignment " << static_cast<double>(get<5>(alignstats)) / (get<0>(alignstats) - get<3>(alignstats)) << " bps" << endl;
}
#endif
cout << "\nOutputted " << get<3>(alignstats) << " lines in " << get<6>(alignstats) << "s" << endl;
delete [] rowids;
delete [] values;
}//for(int b = 0; b < states; ++b)
delete [] colptrC;
delete [] colStart;
}
|
GB_binop__pair_fc32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_fc32)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_fc32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_fc32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// A pattern? 1
// B type: GxB_FC32_t
// B pattern? 1
// BinaryOp: cij = GxB_CMPLXF(1,0)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GxB_CMPLXF(1,0) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_FC32 || GxB_NO_PAIR_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pair_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC32_t alpha_scalar ;
GxB_FC32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = GxB_CMPLXF(1,0) ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = GxB_CMPLXF(1,0) ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = GxB_CMPLXF(1,0) ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = GxB_CMPLXF(1,0) ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
7.norace4.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define M 20
#define N 20
int main() {
double data[M][N], mean[N];
#pragma omp parallel for
for (int i = 0; i < N; i++)
for (int j = 0; j < M; j++) {
data[i][j] -= mean[j];
}
}
// CHECK: Region is Data Race Free.
// END
|
GB_binop__isgt_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_int16)
// A.*B function (eWiseMult): GB (_AemultB_01__isgt_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__isgt_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_int16)
// A*D function (colscale): GB (_AxD__isgt_int16)
// D*A function (rowscale): GB (_DxB__isgt_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_int16)
// C=scalar+B GB (_bind1st__isgt_int16)
// C=scalar+B' GB (_bind1st_tran__isgt_int16)
// C=A+scalar GB (_bind2nd__isgt_int16)
// C=A'+scalar GB (_bind2nd_tran__isgt_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_INT16 || GxB_NO_ISGT_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isgt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isgt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isgt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pi-v3.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
#if _DEBUG_
#define _DEBUG_ 1
#else
#define _DEBUG_ 0
#include "extrae_user_events.h"
#define PROGRAM 1000
#define PI_COMPUTATION 1
#define END 0
#endif
int main(int argc, char *argv[]) {
double x, sum=0.0, pi=0.0;
#if _DEBUG_
double start,end;
#endif
int i;
const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n";
if (argc < 2) {
fprintf(stderr, Usage);
exit(1);
}
int num_steps = atoi(argv[1]);
double step = 1.0/(double) num_steps;
#if _DEBUG_
start= omp_get_wtime();
#else
Extrae_event (PROGRAM, PI_COMPUTATION);
#endif
/* do computation -- using all available threads */
// WARNING : incorrect code
#pragma omp parallel private(i, x)
{
int id = omp_get_thread_num();
int num_threads = omp_get_num_threads();
// interleaved execution of iterations among threads
for (i=id; i < num_steps; i=i+num_threads) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
#if _DEBUG_
printf("thread id:%d it:%d\n",id,i);
#endif
}
}
pi = step * sum;
#if _DEBUG_
end = omp_get_wtime();
printf("Wall clock execution time = %.9f seconds\n", end-start);
#else
Extrae_event (PROGRAM, END);
#endif
/* print results */
printf("Value of pi = %12.10f\n", pi);
return EXIT_SUCCESS;
}
|
opi.c | #include <stdlib.h>
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#endif
int main(int argc, char **argv)
{
register double width,x;
double sum=0;
register int intervals, i;
double t;
if(argc < 2) {
fprintf(stderr,"\nFalta nº intervalos\n");
exit(-1);
}
intervals=atoi(argv[1]);
if (intervals<1) intervals=1;
t=omp_get_wtime();
width = 1.0 / intervals;
#pragma omp parallel
{
#pragma omp for reduction(+:sum) private(x)
for (i=0; i<intervals; i++) {
x = (i + 0.5) * width;
sum += 4.0 / (1.0 + x * x);
}
}
sum *= width;
t=omp_get_wtime()-t;
printf("Iteraciones: %d.\t Pi: %26.24f.\t Threads: %d.\t Tiempo:\t%8.6f\n",
intervals,sum,omp_get_max_threads(),t);
return(0);
}
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/animate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/timer.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
#include "magick/xwindow-private.h"
/*
Constant declaration.
*/
const char
BackgroundColor[] = "#ffffff", /* white */
BorderColor[] = "#dfdfdf", /* gray */
DefaultTileFrame[] = "15x15+3+3",
DefaultTileGeometry[] = "120x120+4+3>",
DefaultTileLabel[] = "%f\n%G\n%b",
ForegroundColor[] = "#000", /* black */
LoadImageTag[] = "Load/Image",
LoadImagesTag[] = "Load/Images",
MatteColor[] = "#bdbdbd", /* gray */
PSDensityGeometry[] = "72.0x72.0",
PSPageGeometry[] = "612x792",
SaveImageTag[] = "Save/Image",
SaveImagesTag[] = "Save/Images",
TransparentColor[] = "#00000000"; /* transparent black */
const double
DefaultResolution = 72.0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireMagickMemory(sizeof(*image));
if (image == (Image *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MaxTextExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
image->blur=1.0;
GetExceptionInfo(&image->exception);
(void) QueryColorDatabase(BackgroundColor,&image->background_color,
&image->exception);
(void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception);
(void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception);
(void) QueryColorDatabase(TransparentColor,&image->transparent_color,
&image->exception);
GetTimerInfo(&image->timer);
image->ping=MagickFalse;
image->cache=AcquirePixelCache(0);
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=time((time_t *) NULL);
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AllocateSemaphoreInfo();
image->signature=MagickSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MaxTextExtent);
(void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->matte_color=image_info->matte_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
(void) SyncImageSettings(image_info,image);
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info));
if (image_info == (ImageInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MaxTextExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MaxTextExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
matte,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
matte=images->matte;
number_images=1;
width=images->columns;
height=images->rows;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass) == MagickFalse)
{
InheritException(exception,&append_image->exception);
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
append_image->matte=matte;
(void) SetImageBackgroundColor(append_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
Image
*image;
MagickBooleanType
proceed;
image=CloneImage(next,0,0,MagickTrue,exception);
if (image == (Image *) NULL)
break;
status=TransformImageColorspace(image,append_image->colorspace);
if (status == MagickFalse)
break;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict append_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
append_indexes=GetCacheViewAuthenticIndexQueue(append_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if ((image->colorspace == CMYKColorspace) &&
(append_image->colorspace == CMYKColorspace))
SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=DestroyImage(image);
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
GetImageException(image,exception);
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image)
{
return(ClipImagePath(image,"#1",MagickTrue));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(&image->exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask);
if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (inside == MagickFalse)
(void) NegateImage(clip_mask,MagickFalse);
(void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageClipMask(image,clip_mask);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
double
scale;
Image
*clone_image;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
clone_image=(Image *) AcquireMagickMemory(sizeof(*clone_image));
if (clone_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickSignature;
clone_image->storage_class=image->storage_class;
clone_image->channels=image->channels;
clone_image->colorspace=image->colorspace;
clone_image->matte=image->matte;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
if (image->colormap != (PixelPacket *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelPacket *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) CopyMagickMemory(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
GetExceptionInfo(&clone_image->exception);
InheritException(&clone_image->exception,&image->exception);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MaxTextExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent);
(void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
clone_image->clip_mask=NewImageList();
clone_image->mask=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AllocateSemaphoreInfo();
if ((columns == 0) && (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
if ((columns == image->columns) && (rows == image->rows))
{
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
}
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) floor(scale*image->page.width+0.5);
clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5);
clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5);
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) floor(scale*image->page.height+0.5);
clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5);
clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5);
clone_image->columns=columns;
clone_image->rows=rows;
clone_image->cache=ClonePixelCache(image->cache);
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
(void) CloneString(&clone_info->size,image_info->size);
(void) CloneString(&clone_info->extract,image_info->extract);
(void) CloneString(&clone_info->scenes,image_info->scenes);
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
(void) CloneString(&clone_info->sampling_factor,image_info->sampling_factor);
(void) CloneString(&clone_info->server_name,image_info->server_name);
(void) CloneString(&clone_info->font,image_info->font);
(void) CloneString(&clone_info->texture,image_info->texture);
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->pen=image_info->pen;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->matte_color=image_info->matte_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colors=image_info->colors;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->preview_type=image_info->preview_type;
clone_info->group=image_info->group;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
(void) CloneString(&clone_info->view,image_info->view);
(void) CloneString(&clone_info->authenticate,image_info->authenticate);
(void) CloneImageOptions(clone_info,image_info);
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->virtual_pixel_method=image_info->virtual_pixel_method;
(void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent);
(void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MaxTextExtent);
clone_info->subimage=image_info->scene; /* deprecated */
clone_info->subrange=image_info->number_scenes; /* deprecated */
clone_info->channel=image_info->channel;
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelPacket *) NULL)
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info*) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
DestroyBlob(image);
(void) DestroyExceptionInfo(&image->exception);
if (image->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&image->semaphore);
image->signature=(~MagickSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->view != (char *) NULL)
image_info->view=DestroyString(image_info->view);
if (image_info->authenticate != (char *) NULL)
image_info->authenticate=DestroyString(
image_info->authenticate);
DestroyImageOptions(image_info);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
image_info->signature=(~MagickSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream.
%
% The format of the DisassociateImageStream method is:
%
% MagickBooleanType DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
(void) DetachBlob(image->blob);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageClipMask() returns the clip path associated with the image.
%
% The format of the GetImageClipMask method is:
%
% Image *GetImageClipMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageClipMask(const Image *image,
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->clip_mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->clip_mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageException() traverses an image sequence and returns any
% error more severe than noted by the exception parameter.
%
% The format of the GetImageException method is:
%
% void GetImageException(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to a list of one or more images.
%
% o exception: return the highest severity exception.
%
*/
MagickExport void GetImageException(Image *image,ExceptionInfo *exception)
{
register Image
*next;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->exception.severity == UndefinedException)
continue;
if (next->exception.severity > exception->severity)
InheritException(exception,&next->exception);
next->exception.severity=UndefinedException;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) ResetMagickMemory(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorDatabase(BackgroundColor,&image_info->background_color,
exception);
(void) QueryColorDatabase(BorderColor,&image_info->border_color,exception);
(void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception);
(void) QueryColorDatabase(TransparentColor,&image_info->transparent_color,
exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannels() returns the number of pixel channels associated with the
% specified image.
%
% The format of the GetChannels method is:
%
% size_t GetImageChannels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport size_t GetImageChannels(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(image->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename)
{
char
*q;
int
c;
MagickBooleanType
canonical;
register const char
*p;
size_t
length;
canonical=MagickFalse;
length=0;
(void) CopyMagickString(filename,format,MaxTextExtent);
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
if (*q == '0')
{
ssize_t
value;
value=(ssize_t) strtol(q,&q,10);
(void) value;
}
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format),(size_t) (MaxTextExtent-
(p-format)),p,value);
*q=c;
(void) ConcatenateMagickString(filename,q,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MaxTextExtent];
const char
*value;
register char
*r;
register ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
value=(const char *) NULL;
#if 0
// FUTURE: remove this code. -- Anthony 29 Arpil 2012
// Removed as GetMagickProperty() will will never match a "filename:"
// string as this is not a 'known' image property.
//
if ((image_info != (const ImageInfo *) NULL) &&
(image != (const Image *) NULL))
value=GetMagickProperty(image_info,image,pattern);
else
#endif
if (image != (Image *) NULL)
value=GetImageProperty(image,pattern);
if ((value == (const char *) NULL) &&
(image != (Image *) NULL))
value=GetImageArtifact(image,pattern);
if ((value == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
value=GetImageOption(image_info,pattern);
if (value == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-length),value,(size_t)
(MaxTextExtent-(p-format-length)));
length+=strlen(pattern)-1;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
{
(void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename)));
canonical=MagickTrue;
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MaxTextExtent);
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((pixel.red < 0.0) || (pixel.red > QuantumRange) ||
(pixel.red != (QuantumAny) pixel.red))
break;
if ((pixel.green < 0.0) || (pixel.green > QuantumRange) ||
(pixel.green != (QuantumAny) pixel.green))
break;
if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) ||
(pixel.blue != (QuantumAny) pixel.blue))
break;
if (pixel.matte != MagickFalse)
{
if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) ||
(pixel.opacity != (QuantumAny) pixel.opacity))
break;
}
if (pixel.colorspace == CMYKColorspace)
{
if ((pixel.index < 0.0) || (pixel.index > QuantumRange) ||
(pixel.index != (QuantumAny) pixel.index))
break;
}
p++;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MaxTextExtent],
filename[MaxTextExtent];
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
(void) CopyMagickString(magick,image->magick,MaxTextExtent);
(void) CopyMagickString(filename,image->filename,MaxTextExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,
% const size_t width,const size_t height,
% const MagickPixelPacket *background)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,
const MagickPixelPacket *background)
{
CacheView
*image_view;
ExceptionInfo
*exception;
Image
*image;
ssize_t
y;
MagickBooleanType
status;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickSignature);
assert(background != (const MagickPixelPacket *) NULL);
image=AcquireImage(image_info);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->matte=background->matte;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,background,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) TransformImageColorspace(image,RGBColorspace);
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
SetPixelPacket(image,&background,&pixel,&index);
/*
Set image background color.
*/
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
*q++=pixel;
if (image->colorspace == CMYKColorspace)
{
register IndexPacket
*restrict indexes;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannels() sets the number of pixels channels associated with the
% image.
%
% The format of the SetImageChannels method is:
%
% MagickBooleanType SetImageChannels(Image *image,const size_t channels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channels: The number of pixel channels.
%
*/
MagickExport MagickBooleanType SetImageChannels(Image *image,
const size_t channels)
{
image->channels=channels;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,
% const MagickPixelPacket *color)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const MagickPixelPacket *color)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
assert(color != (const MagickPixelPacket *) NULL);
image->colorspace=color->colorspace;
image->matte=color->matte;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,color,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class)
{
image->storage_class=storage_class;
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageClipMask() associates a clip path with the image. The clip path
% must be the same dimensions as the image. Set any pixel component of
% the clip path to TransparentOpacity to prevent that corresponding image
% pixel component from being updated when SyncAuthenticPixels() is applied.
%
% The format of the SetImageClipMask method is:
%
% MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clip_mask: the image clip path.
%
*/
MagickExport MagickBooleanType SetImageClipMask(Image *image,
const Image *clip_mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (clip_mask != (const Image *) NULL)
if ((clip_mask->columns != image->columns) ||
(clip_mask->rows != image->rows))
ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
image->clip_mask=NewImageList();
if (clip_mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception);
if (image->clip_mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,
% const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,
const size_t columns,const size_t rows)
{
if ((columns == 0) || (rows == 0))
return(MagickFalse);
image->columns=columns;
image->rows=rows;
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the `magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, `ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: `image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
extension[MaxTextExtent],
filename[MaxTextExtent],
magic[MaxTextExtent],
*q,
subimage[MaxTextExtent];
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
unsigned char
magick[2*MaxTextExtent];
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*subimage='\0';
if (frames == 0)
{
GetPathComponent(image_info->filename,SubimagePath,subimage);
if (*subimage != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse)
{
if (IsGeometry(subimage) != MagickFalse)
(void) CloneString(&image_info->extract,subimage);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,subimage);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
image_info->subimage=image_info->scene;
image_info->subrange=image_info->number_scenes;
}
}
}
*extension='\0';
GetPathComponent(image_info->filename,ExtensionPath,extension);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*extension != '\0')
if ((LocaleCompare(extension,"gz") == 0) ||
(LocaleCompare(extension,"Z") == 0) ||
(LocaleCompare(extension,"svgz") == 0) ||
(LocaleCompare(extension,"wmz") == 0))
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*extension != '\0')
if (LocaleCompare(extension,"bz2") == 0)
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if (*extension != '\0')
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"EPHEMERAL",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,extension,MaxTextExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
(void) CopyMagickString(magic,image_info->magick,MaxTextExtent);
else
{
/*
User specified image format.
*/
LocaleUpper(magic);
if (IsMagickConflict(magic) == MagickFalse)
{
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
if (LocaleCompare(magic,"EPHEMERAL") != 0)
image_info->affirm=MagickTrue;
else
image_info->temporary=MagickTrue;
}
}
magick_info=GetMagickInfo(magic,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
GetPathComponent(image_info->filename,CanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,filename);
if ((LocaleCompare(filename,image_info->filename) != 0) &&
(strchr(filename,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
/*
Determine the image format from the first few bytes of the file.
*/
image=AcquireImage(image_info);
(void) CopyMagickString(image->filename,image_info->filename,
MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy standard input or pipe to temporary file.
*/
*filename='\0';
status=ImageToFile(image,filename,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,filename,MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
image_info->temporary=MagickTrue;
}
(void) ResetMagickMemory(magick,0,sizeof(magick));
count=ReadBlob(image,2*MaxTextExtent,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic.xml configuration file.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
(void) CopyMagickString(image_info->magick,GetMagicName(magic_info),
MaxTextExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const Image *mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mask: the image mask.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (mask != (const Image *) NULL)
if ((mask->columns != image->columns) || (mask->rows != image->rows))
ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
image->mask=NewImageList();
if (mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception);
if (image->mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e O p a c i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageOpacity() sets the opacity levels of the image.
%
% The format of the SetImageOpacity method is:
%
% MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
*/
MagickExport MagickBooleanType SetImageOpacity(Image *image,
const Quantum opacity)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
image->matte=MagickTrue;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelOpacity(q,opacity);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const PixelPacket
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
register const PixelPacket
*p;
register ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
CacheView
*smush_view;
const Image
*image;
Image
*smush_image;
MagickBooleanType
matte,
proceed,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=images;
matte=image->matte;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse)
{
InheritException(exception,&smush_image->exception);
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->matte=matte;
(void) SetImageBackgroundColor(smush_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
smush_view=AcquireVirtualCacheView(smush_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
smush_view=DestroyCacheView(smush_view);
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType StripImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
(void) SetImageArtifact(image,"png:include-chunk","none,trns,gama");
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline IndexPacket PushColormapIndex(Image *image,
const size_t index,MagickBooleanType *range_exception)
{
if (index < image->colors)
return((IndexPacket) index);
*range_exception=MagickTrue;
return((IndexPacket) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
range_exception,
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->storage_class == DirectClass)
return(MagickFalse);
range_exception=MagickFalse;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(range_exception,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
index;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x),
&range_exception);
if (image->matte == MagickFalse)
SetPixelRgb(q,image->colormap+(ssize_t) index)
else
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CorruptImageError,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs image_info options into per-image attributes.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image)
{
char
property[MaxTextExtent];
const char
*option,
*value;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->background_color,
&image->exception);
option=GetImageOption(image_info,"bias");
if (option != (const char *) NULL)
image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->border_color,&image->exception);
option=GetImageOption(image_info,"colors");
if (option != (const char *) NULL)
image->colors=StringToUnsignedLong(option);
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
/*
Set image density.
*/
flags=ParseGeometry(option,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(InterpolatePixelMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->matte_color,&image->exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->transparent_color,
&image->exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
else
units = image_info->units;
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->x_resolution/=2.54;
image->y_resolution/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->x_resolution=(double) ((size_t) (100.0*2.54*
image->x_resolution+0.5))/100.0;
image->y_resolution=(double) ((size_t) (100.0*2.54*
image->y_resolution+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
}
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
ResetImageOptionIterator(image_info);
for (option=GetNextImageOption(image_info); option != (const char *) NULL; )
{
value=GetImageOption(image_info,option);
if (value != (const char *) NULL)
{
(void) FormatLocaleString(property,MaxTextExtent,"%s",option);
(void) SetImageArtifact(image,property,value);
}
option=GetNextImageOption(image_info);
}
return(MagickTrue);
}
|
concurrent_unordered_map.cuh.h | /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This source code refers to https://github.com/rapidsai/cudf
* and is licensed under the license found in the LICENSE file
* in the root directory of this source tree.
*/
#ifndef CONCURRENT_UNORDERED_MAP_CUH
#define CONCURRENT_UNORDERED_MAP_CUH
#include <thrust/pair.h>
#include <cassert>
#include <iostream>
#include <iterator>
#include <type_traits>
#include "hash_functions.cuh"
#include "managed.cuh"
#include "managed_allocator.cuh"
// TODO: replace this with CUDA_TRY and propagate the error
#ifndef CUDA_RT_CALL
#define CUDA_RT_CALL(call) \
{ \
cudaError_t cudaStatus = call; \
if (cudaSuccess != cudaStatus) { \
fprintf(stderr, \
"ERROR: CUDA RT call \"%s\" in line %d of file %s failed with " \
"%s (%d).\n", \
#call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), \
cudaStatus); \
exit(1); \
} \
}
#endif
// TODO: can we do this more efficiently?
__inline__ __device__ int8_t atomicCAS(int8_t* address, int8_t compare,
int8_t val) {
int32_t* base_address = (int32_t*)((char*)address - ((size_t)address & 3));
int32_t int_val = (int32_t)val << (((size_t)address & 3) * 8);
int32_t int_comp = (int32_t)compare << (((size_t)address & 3) * 8);
return (int8_t)atomicCAS(base_address, int_comp, int_val);
}
// TODO: can we do this more efficiently?
__inline__ __device__ int16_t atomicCAS(int16_t* address, int16_t compare,
int16_t val) {
int32_t* base_address = (int32_t*)((char*)address - ((size_t)address & 2));
int32_t int_val = (int32_t)val << (((size_t)address & 2) * 8);
int32_t int_comp = (int32_t)compare << (((size_t)address & 2) * 8);
return (int16_t)atomicCAS(base_address, int_comp, int_val);
}
__inline__ __device__ int64_t atomicCAS(int64_t* address, int64_t compare,
int64_t val) {
return (int64_t)atomicCAS((unsigned long long*)address,
(unsigned long long)compare,
(unsigned long long)val);
}
__inline__ __device__ uint64_t atomicCAS(uint64_t* address, uint64_t compare,
uint64_t val) {
return (uint64_t)atomicCAS((unsigned long long*)address,
(unsigned long long)compare,
(unsigned long long)val);
}
__inline__ __device__ long long int atomicCAS(long long int* address,
long long int compare,
long long int val) {
return (long long int)atomicCAS((unsigned long long*)address,
(unsigned long long)compare,
(unsigned long long)val);
}
__inline__ __device__ double atomicCAS(double* address, double compare,
double val) {
return __longlong_as_double(atomicCAS((unsigned long long int*)address,
__double_as_longlong(compare),
__double_as_longlong(val)));
}
__inline__ __device__ float atomicCAS(float* address, float compare,
float val) {
return __int_as_float(
atomicCAS((int*)address, __float_as_int(compare), __float_as_int(val)));
}
__inline__ __device__ int64_t atomicAdd(int64_t* address, int64_t val) {
return (int64_t)atomicAdd((unsigned long long*)address,
(unsigned long long)val);
}
__inline__ __device__ uint64_t atomicAdd(uint64_t* address, uint64_t val) {
return (uint64_t)atomicAdd((unsigned long long*)address,
(unsigned long long)val);
}
template <typename pair_type>
__forceinline__ __device__ pair_type
load_pair_vectorized(const pair_type* __restrict__ const ptr) {
if (sizeof(uint4) == sizeof(pair_type)) {
union pair_type2vec_type {
uint4 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0, 0, 0};
converter.vec_val = *reinterpret_cast<const uint4*>(ptr);
return converter.pair_val;
} else if (sizeof(uint2) == sizeof(pair_type)) {
union pair_type2vec_type {
uint2 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0};
converter.vec_val = *reinterpret_cast<const uint2*>(ptr);
return converter.pair_val;
} else if (sizeof(int) == sizeof(pair_type)) {
union pair_type2vec_type {
int vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.vec_val = *reinterpret_cast<const int*>(ptr);
return converter.pair_val;
} else if (sizeof(short) == sizeof(pair_type)) {
union pair_type2vec_type {
short vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.vec_val = *reinterpret_cast<const short*>(ptr);
return converter.pair_val;
} else {
return *ptr;
}
}
template <typename pair_type>
__forceinline__ __device__ void store_pair_vectorized(
pair_type* __restrict__ const ptr, const pair_type val) {
if (sizeof(uint4) == sizeof(pair_type)) {
union pair_type2vec_type {
uint4 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0, 0, 0};
converter.pair_val = val;
*reinterpret_cast<uint4*>(ptr) = converter.vec_val;
} else if (sizeof(uint2) == sizeof(pair_type)) {
union pair_type2vec_type {
uint2 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0};
converter.pair_val = val;
*reinterpret_cast<uint2*>(ptr) = converter.vec_val;
} else if (sizeof(int) == sizeof(pair_type)) {
union pair_type2vec_type {
int vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.pair_val = val;
*reinterpret_cast<int*>(ptr) = converter.vec_val;
} else if (sizeof(short) == sizeof(pair_type)) {
union pair_type2vec_type {
short vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.pair_val = val;
*reinterpret_cast<short*>(ptr) = converter.vec_val;
} else {
*ptr = val;
}
}
template <typename value_type, typename size_type, typename key_type,
typename elem_type>
__global__ void init_hashtbl( // Init every entry of the table with
// <unused_key, unused_value> pair
value_type* __restrict__ const hashtbl_values, const size_type n,
const key_type key_val, const elem_type elem_val) {
const size_type idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
store_pair_vectorized(
hashtbl_values + idx,
thrust::make_pair(
key_val, elem_val)); // Simply store every element a <K, V> pair
}
}
template <typename T>
struct equal_to {
using result_type = bool;
using first_argument_type = T;
using second_argument_type = T;
__forceinline__ __host__ __device__ constexpr bool operator()(
const first_argument_type& lhs, const second_argument_type& rhs) const {
return lhs == rhs;
}
};
template <typename Iterator>
class cycle_iterator_adapter {
public:
using value_type = typename std::iterator_traits<Iterator>::value_type;
using difference_type =
typename std::iterator_traits<Iterator>::difference_type;
using pointer = typename std::iterator_traits<Iterator>::pointer;
using reference = typename std::iterator_traits<Iterator>::reference;
using iterator_type = Iterator;
cycle_iterator_adapter() = delete;
__host__ __device__ explicit cycle_iterator_adapter(
const iterator_type& begin, const iterator_type& end,
const iterator_type& current)
: m_begin(begin), m_end(end), m_current(current) {}
__host__ __device__ cycle_iterator_adapter& operator++() {
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return *this;
}
__host__ __device__ const cycle_iterator_adapter& operator++() const {
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return *this;
}
__host__ __device__ cycle_iterator_adapter& operator++(int) {
cycle_iterator_adapter<iterator_type> old(m_begin, m_end, m_current);
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return old;
}
__host__ __device__ const cycle_iterator_adapter& operator++(int) const {
cycle_iterator_adapter<iterator_type> old(m_begin, m_end, m_current);
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return old;
}
__host__ __device__ bool equal(
const cycle_iterator_adapter<iterator_type>& other) const {
return m_current == other.m_current && m_begin == other.m_begin &&
m_end == other.m_end;
}
__host__ __device__ reference& operator*() { return *m_current; }
__host__ __device__ const reference& operator*() const { return *m_current; }
__host__ __device__ const pointer operator->() const {
return m_current.operator->();
}
__host__ __device__ pointer operator->() { return m_current; }
__host__ __device__ iterator_type getter() const { return m_current; }
private:
iterator_type m_current;
iterator_type m_begin;
iterator_type m_end;
};
template <class T>
__host__ __device__ bool operator==(const cycle_iterator_adapter<T>& lhs,
const cycle_iterator_adapter<T>& rhs) {
return lhs.equal(rhs);
}
template <class T>
__host__ __device__ bool operator!=(const cycle_iterator_adapter<T>& lhs,
const cycle_iterator_adapter<T>& rhs) {
return !lhs.equal(rhs);
}
/**
* Does support concurrent insert, but not concurrent insert and probping.
*
* TODO:
* - add constructor that takes pointer to hash_table to avoid allocations
* - extend interface to accept streams
*/
template <typename Key, typename Element, Key unused_key,
typename Hasher = default_hash<Key>,
typename Equality = equal_to<Key>,
typename Allocator = managed_allocator<thrust::pair<Key, Element>>,
bool count_collisions = false>
class concurrent_unordered_map : public managed {
public:
using size_type = size_t;
using hasher = Hasher;
using key_equal = Equality;
using allocator_type = Allocator;
using key_type = Key;
using value_type = thrust::pair<Key, Element>;
using mapped_type = Element;
using iterator = cycle_iterator_adapter<value_type*>;
using const_iterator = const cycle_iterator_adapter<value_type*>;
private:
union pair2longlong {
unsigned long long int longlong;
value_type pair;
};
public:
concurrent_unordered_map(const concurrent_unordered_map&) = delete;
concurrent_unordered_map& operator=(const concurrent_unordered_map&) = delete;
explicit concurrent_unordered_map(size_type n,
const mapped_type unused_element,
const Hasher& hf = hasher(),
const Equality& eql = key_equal(),
const allocator_type& a = allocator_type())
: m_hf(hf),
m_equal(eql),
m_allocator(a),
m_hashtbl_size(n),
m_hashtbl_capacity(n),
m_collisions(0),
m_unused_element(
unused_element) { // allocate the raw data of hash table:
// m_hashtbl_values,pre-alloc it on current GPU if UM.
m_hashtbl_values = m_allocator.allocate(m_hashtbl_capacity);
constexpr int block_size = 128;
{
cudaPointerAttributes hashtbl_values_ptr_attributes;
cudaError_t status = cudaPointerGetAttributes(
&hashtbl_values_ptr_attributes, m_hashtbl_values);
#if CUDART_VERSION >= 10000
if (cudaSuccess == status &&
hashtbl_values_ptr_attributes.type == cudaMemoryTypeManaged)
#else
if (cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged)
#endif
{
int dev_id = 0;
CUDA_RT_CALL(cudaGetDevice(&dev_id));
CUDA_RT_CALL(cudaMemPrefetchAsync(
m_hashtbl_values, m_hashtbl_size * sizeof(value_type), dev_id, 0));
}
}
// Initialize kernel, set all entry to unused <K,V>
init_hashtbl<<<((m_hashtbl_size - 1) / block_size) + 1, block_size>>>(
m_hashtbl_values, m_hashtbl_size, unused_key, m_unused_element);
// CUDA_RT_CALL( cudaGetLastError() );
CUDA_RT_CALL(cudaStreamSynchronize(0));
CUDA_RT_CALL(cudaGetLastError());
}
~concurrent_unordered_map() {
m_allocator.deallocate(m_hashtbl_values, m_hashtbl_capacity);
}
__host__ __device__ iterator begin() {
return iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values);
}
__host__ __device__ const_iterator begin() const {
return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values);
}
__host__ __device__ iterator end() {
return iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values + m_hashtbl_size);
}
__host__ __device__ const_iterator end() const {
return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values + m_hashtbl_size);
}
__host__ __device__ size_type size() const { return m_hashtbl_size; }
__host__ __device__ value_type* data() const { return m_hashtbl_values; }
__forceinline__ static constexpr __host__ __device__ key_type
get_unused_key() {
return unused_key;
}
// Generic update of a hash table value for any aggregator
template <typename aggregation_type>
__forceinline__ __device__ void update_existing_value(
mapped_type& existing_value, value_type const& insert_pair,
aggregation_type) {
// update without CAS
existing_value = insert_pair.second;
}
__forceinline__ __device__ void accum_existing_value_atomic(
mapped_type& existing_value, value_type const& accum_pair) {
// update with CAS
// existing_value = insert_pair.second;
int num_element =
sizeof(existing_value.data) / sizeof(*(existing_value.data));
const mapped_type& accumulator = accum_pair.second;
for (int i = 0; i < num_element; i++) {
atomicAdd(existing_value.data + i, accumulator.data[i]);
}
// atomicAdd(&existing_value, double val)
}
// TODO Overload atomicAdd for 1 byte and 2 byte types, until then, overload
// specifically for the
// types where atomicAdd already has an overload. Otherwise the generic
// update_existing_value will
// be used. Specialization for COUNT aggregator
/*
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<int32_t> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
// Specialization for COUNT aggregator
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<int64_t> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
// Specialization for COUNT aggregator
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<float> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
// Specialization for COUNT aggregator
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<double> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
*/
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Inserts a new (key, value) pair. If the key already exists in
the map
an aggregation operation is performed with the new value and
existing value.
E.g., if the aggregation operation is 'max', then the maximum is
computed
between the new value and existing value and the result is
stored in the map.
*
* @Param[in] x The new (key, value) pair to insert
* @Param[in] op The aggregation operation to perform
* @Param[in] keys_equal An optional functor for comparing two keys
* @Param[in] precomputed_hash Indicates if a precomputed hash value is being
passed in to use
* to determine the write location of the new key
* @Param[in] precomputed_hash_value The precomputed hash value
* @tparam aggregation_type A functor for a binary operation that performs the
aggregation
* @tparam comparison_type A functor for comparing two keys
*
* @Returns An iterator to the newly inserted key,value pair
*/
/* ----------------------------------------------------------------------------*/
template <typename aggregation_type, class comparison_type = key_equal,
typename hash_value_type = typename Hasher::result_type>
__forceinline__ __device__ iterator insert(
const value_type& x, aggregation_type op,
comparison_type keys_equal = key_equal(), bool precomputed_hash = false,
hash_value_type precomputed_hash_value = 0) {
const size_type hashtbl_size = m_hashtbl_size;
value_type* hashtbl_values = m_hashtbl_values;
hash_value_type hash_value{0};
// If a precomputed hash value has been passed in, then use it to determine
// the write location of the new key
if (true == precomputed_hash) {
hash_value = precomputed_hash_value;
}
// Otherwise, compute the hash value from the new key
else {
hash_value = m_hf(x.first);
}
size_type current_index = hash_value % hashtbl_size;
value_type* current_hash_bucket = &(hashtbl_values[current_index]);
const key_type insert_key = x.first;
bool insert_success = false;
size_type counter = 0;
while (false == insert_success) {
if (counter++ >= hashtbl_size) {
return end();
}
key_type& existing_key = current_hash_bucket->first;
mapped_type& existing_value = current_hash_bucket->second;
// Try and set the existing_key for the current hash bucket to insert_key
const key_type old_key = atomicCAS(&existing_key, unused_key, insert_key);
// If old_key == unused_key, the current hash bucket was empty
// and existing_key was updated to insert_key by the atomicCAS.
// If old_key == insert_key, this key has already been inserted.
// In either case, perform the atomic aggregation of existing_value and
// insert_value
// Because the hash table is initialized with the identity value of the
// aggregation
// operation, it is safe to perform the operation when the existing_value
// still
// has its initial value
// TODO: Use template specialization to make use of native atomic
// functions
// TODO: How to handle data types less than 32 bits?
if (keys_equal(unused_key, old_key) || keys_equal(insert_key, old_key)) {
update_existing_value(existing_value, x, op);
insert_success = true;
break;
}
current_index = (current_index + 1) % hashtbl_size;
current_hash_bucket = &(hashtbl_values[current_index]);
}
return iterator(m_hashtbl_values, m_hashtbl_values + hashtbl_size,
current_hash_bucket);
}
/* This function is not currently implemented
__forceinline__
__host__ __device__ iterator insert(const value_type& x)
{
const size_type hashtbl_size = m_hashtbl_size;
value_type* hashtbl_values = m_hashtbl_values;
const size_type key_hash = m_hf( x.first );
size_type hash_tbl_idx = key_hash%hashtbl_size;
value_type* it = 0;
while (0 == it) {
value_type* tmp_it = hashtbl_values + hash_tbl_idx;
#ifdef __CUDA_ARCH__
if ( std::numeric_limits<key_type>::is_integer &&
std::numeric_limits<mapped_type>::is_integer && sizeof(unsigned long long int)
== sizeof(value_type)
)
{
pair2longlong converter = {0ull};
converter.pair = thrust::make_pair( unused_key, m_unused_element
);
const unsigned long long int unused = converter.longlong;
converter.pair = x;
const unsigned long long int value = converter.longlong;
const unsigned long long int old_val = atomicCAS(
reinterpret_cast<unsigned long long
int*>(tmp_it), unused, value ); if ( old_val == unused ) { it = tmp_it;
}
else if ( count_collisions )
{
atomicAdd( &m_collisions, 1 );
}
} else {
const key_type old_key = atomicCAS( &(tmp_it->first), unused_key,
x.first );
if ( m_equal( unused_key, old_key ) ) {
(m_hashtbl_values+hash_tbl_idx)->second = x.second;
it = tmp_it;
}
else if ( count_collisions )
{
atomicAdd( &m_collisions, 1 );
}
}
#else
#pragma omp critical
{
if ( m_equal( unused_key, tmp_it->first ) ) {
hashtbl_values[hash_tbl_idx] = thrust::make_pair( x.first,
x.second );
it = tmp_it;
}
}
#endif
hash_tbl_idx = (hash_tbl_idx+1)%hashtbl_size;
}
return iterator( m_hashtbl_values,m_hashtbl_values+hashtbl_size,it);
}
*/
__forceinline__ __host__ __device__ const_iterator
find(const key_type& k) const {
size_type key_hash = m_hf(k);
size_type hash_tbl_idx = key_hash % m_hashtbl_size;
value_type* begin_ptr = 0;
size_type counter = 0;
while (0 == begin_ptr) {
value_type* tmp_ptr = m_hashtbl_values + hash_tbl_idx;
const key_type tmp_val = tmp_ptr->first;
if (m_equal(k, tmp_val)) {
begin_ptr = tmp_ptr;
break;
}
if (m_equal(unused_key, tmp_val) || counter > m_hashtbl_size) {
begin_ptr = m_hashtbl_values + m_hashtbl_size;
break;
}
hash_tbl_idx = (hash_tbl_idx + 1) % m_hashtbl_size;
++counter;
}
return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
begin_ptr);
}
template <typename aggregation_type, typename counter_type,
class comparison_type = key_equal,
typename hash_value_type = typename Hasher::result_type>
__forceinline__ __device__ iterator get_insert(
const key_type& k, aggregation_type op, counter_type* value_counter,
comparison_type keys_equal = key_equal(), bool precomputed_hash = false,
hash_value_type precomputed_hash_value = 0) {
const size_type hashtbl_size = m_hashtbl_size;
value_type* hashtbl_values = m_hashtbl_values;
hash_value_type hash_value{0};
// If a precomputed hash value has been passed in, then use it to determine
// the write location of the new key
if (true == precomputed_hash) {
hash_value = precomputed_hash_value;
}
// Otherwise, compute the hash value from the new key
else {
hash_value = m_hf(k);
}
size_type current_index = hash_value % hashtbl_size;
value_type* current_hash_bucket = &(hashtbl_values[current_index]);
const key_type insert_key = k;
bool insert_success = false;
size_type counter = 0;
while (false == insert_success) {
// Situation %5: No slot: All slot in the hashtable is occupied by other
// key, both get and
// insert fail. Return empty iterator
if (counter++ >= hashtbl_size) {
return end();
}
key_type& existing_key = current_hash_bucket->first;
volatile mapped_type& existing_value = current_hash_bucket->second;
// Try and set the existing_key for the current hash bucket to insert_key
const key_type old_key = atomicCAS(&existing_key, unused_key, insert_key);
// If old_key == unused_key, the current hash bucket was empty
// and existing_key was updated to insert_key by the atomicCAS.
// If old_key == insert_key, this key has already been inserted.
// In either case, perform the atomic aggregation of existing_value and
// insert_value
// Because the hash table is initialized with the identity value of the
// aggregation
// operation, it is safe to perform the operation when the existing_value
// still
// has its initial value
// TODO: Use template specialization to make use of native atomic
// functions
// TODO: How to handle data types less than 32 bits?
// Situation #1: Empty slot: this key never exist in the table, ready to
// insert.
if (keys_equal(unused_key, old_key)) {
// update_existing_value(existing_value, x, op);
existing_value = (mapped_type)(atomicAdd(value_counter, 1));
break;
} // Situation #2+#3: Target slot: This slot is the slot for this key
else if (keys_equal(insert_key, old_key)) {
while (existing_value == m_unused_element) {
// Situation #2: This slot is inserting by another CUDA thread and the
// value is not yet
// ready, just wait
}
// Situation #3: This slot is already ready, get successfully and return
// (iterator of) the
// value
break;
}
// Situation 4: Wrong slot: This slot is occupied by other key, get fail,
// do nothing and
// linear probing to next slot.
current_index = (current_index + 1) % hashtbl_size;
current_hash_bucket = &(hashtbl_values[current_index]);
}
return iterator(m_hashtbl_values, m_hashtbl_values + hashtbl_size,
current_hash_bucket);
}
int assign_async(const concurrent_unordered_map& other,
cudaStream_t stream = 0) {
m_collisions = other.m_collisions;
if (other.m_hashtbl_size <= m_hashtbl_capacity) {
m_hashtbl_size = other.m_hashtbl_size;
} else {
m_allocator.deallocate(m_hashtbl_values, m_hashtbl_capacity);
m_hashtbl_capacity = other.m_hashtbl_size;
m_hashtbl_size = other.m_hashtbl_size;
m_hashtbl_values = m_allocator.allocate(m_hashtbl_capacity);
}
CUDA_RT_CALL(cudaMemcpyAsync(m_hashtbl_values, other.m_hashtbl_values,
m_hashtbl_size * sizeof(value_type),
cudaMemcpyDefault, stream));
return 0;
}
void clear_async(cudaStream_t stream = 0) {
constexpr int block_size = 128;
init_hashtbl<<<((m_hashtbl_size - 1) / block_size) + 1, block_size, 0,
stream>>>(m_hashtbl_values, m_hashtbl_size, unused_key,
m_unused_element);
if (count_collisions) m_collisions = 0;
}
unsigned long long get_num_collisions() const { return m_collisions; }
void print() {
for (size_type i = 0; i < 5; ++i) {
std::cout << i << ": " << m_hashtbl_values[i].first << ","
<< m_hashtbl_values[i].second << std::endl;
}
}
int prefetch(const int dev_id, cudaStream_t stream = 0) {
cudaPointerAttributes hashtbl_values_ptr_attributes;
cudaError_t status = cudaPointerGetAttributes(
&hashtbl_values_ptr_attributes, m_hashtbl_values);
#if CUDART_VERSION >= 10000
if (cudaSuccess == status &&
hashtbl_values_ptr_attributes.type == cudaMemoryTypeManaged)
#else
if (cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged)
#endif
{
CUDA_RT_CALL(cudaMemPrefetchAsync(m_hashtbl_values,
m_hashtbl_size * sizeof(value_type),
dev_id, stream));
}
CUDA_RT_CALL(cudaMemPrefetchAsync(this, sizeof(*this), dev_id, stream));
return 0;
}
template <class comparison_type = key_equal,
typename hash_value_type = typename Hasher::result_type>
__forceinline__ __device__ const_iterator
accum(const value_type& x, comparison_type keys_equal = key_equal(),
bool precomputed_hash = false,
hash_value_type precomputed_hash_value = 0) {
const key_type& dst_key = x.first;
auto it = find(dst_key);
if (it == end()) {
return it;
}
value_type* dst = it.getter();
accum_existing_value_atomic(dst->second, x);
return it;
}
private:
const hasher m_hf;
const key_equal m_equal;
const mapped_type m_unused_element;
allocator_type m_allocator;
size_type m_hashtbl_size;
size_type m_hashtbl_capacity;
value_type* m_hashtbl_values;
unsigned long long m_collisions;
};
#endif // CONCURRENT_UNORDERED_MAP_CUH
|
GB_unop__exp2_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__exp2_fc32_fc32)
// op(A') function: GB (_unop_tran__exp2_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_cexp2f (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cexp2f (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_cexp2f (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXP2 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__exp2_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_cexp2f (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_cexp2f (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__exp2_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ReLU.h | // --------------------------------------------------------------------------
// Binary Brain -- binary neural net framework
//
// Copyright (C) 2018-2019 by Ryuji Fuchikami
// https://github.com/ryuz
// ryuji.fuchikami@nifty.com
// --------------------------------------------------------------------------
#pragma once
#include "bb/Manager.h"
#include "bb/Binarize.h"
namespace bb {
// ReLU(活性化層)
template <typename BinType = float, typename RealType = float>
class ReLU : public Binarize<BinType, RealType>
{
using _super = Binarize<BinType, RealType>;
public:
static inline std::string ModelName(void) { return "ReLU"; }
static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<BinType>::Name() + "_" + DataType<RealType>::Name(); }
std::string GetModelName(void) const override { return ModelName(); }
std::string GetObjectName(void) const override { return ObjectName(); }
protected:
bool m_binary_mode;
using _super::m_host_only;
protected:
ReLU() {
m_binary_mode = (DataType<BinType>::type == BB_TYPE_BIT);
}
/**
* @brief コマンド処理
* @detail コマンド処理
* @param args コマンド
*/
void CommandProc(std::vector<std::string> args) override
{
// バイナリモード設定
if ( args.size() == 2 && args[0] == "binary" )
{
if ( DataType<BinType>::type != BB_TYPE_BIT ) {
m_binary_mode = EvalBool(args[1]);
}
}
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "host_only")
{
m_host_only = EvalBool(args[1]);
}
}
void PrintInfoText(std::ostream& os, std::string indent, int columns, int nest, int depth) const override
{
if ( m_binary_mode ) {
os << indent << " binary : " << m_binary_mode << std::endl;
}
_super::PrintInfoText(os, indent, columns, nest, depth);
}
public:
static std::shared_ptr<ReLU> Create(void)
{
auto self = std::shared_ptr<ReLU>(new ReLU);
return self;
}
~ReLU() {}
// 1ノードのみForward計算
std::vector<double> ForwardNode(index_t node, std::vector<double> x_vec) const override
{
if ( m_binary_mode ) {
return _super::ForwardNode(node, x_vec);
}
std::vector<double> y_vec;
for ( auto x : x_vec ) {
y_vec.push_back((x > 0.0) ? x : 0.0); // ReLU
}
return y_vec;
}
/**
* @brief forward演算
* @detail forward演算を行う
* @param x 入力データ
* @param train 学習時にtrueを指定
* @return forward演算結果
*/
inline FrameBuffer Forward(FrameBuffer x_buf, bool train = true) override
{
// binaryモード
if ( DataType<BinType>::type == BB_TYPE_BIT || m_binary_mode) {
return _super::Forward(x_buf, train);
}
BB_ASSERT(x_buf.GetType() == DataType<RealType>::type);
// 戻り値のサイズ設定
FrameBuffer y_buf(x_buf.GetFrameSize(), x_buf.GetShape(), DataType<BinType>::type);
// backward用に保存
if ( train ) {
this->PushFrameBuffer(x_buf);
// this->PushFrameBuffer(y_buf);
}
#ifdef BB_WITH_CUDA
if ( !m_host_only && DataType<BinType>::type == BB_TYPE_FP32 && DataType<RealType>::type == BB_TYPE_FP32
&& x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
// CUDA版
auto ptr_x = x_buf.LockDeviceMemoryConst();
auto ptr_y = y_buf.LockDeviceMemory(true);
bbcu_fp32_ReLU_Forward(
(float const *)ptr_x.GetAddr(),
(float *)ptr_y.GetAddr(),
(int )x_buf.GetNodeSize(),
(int )x_buf.GetFrameSize(),
(int )(x_buf.GetFrameStride() / sizeof(float))
);
return y_buf;
}
#endif
if ( DataType<BinType>::type == BB_TYPE_FP32 && DataType<RealType>::type == BB_TYPE_FP32 ) {
// AVX版
index_t frame_size = x_buf.GetFrameSize();
index_t node_size = x_buf.GetNodeSize();
auto x_ptr = x_buf.template LockConst<float>();
auto y_ptr = y_buf.template Lock<float>(true);
index_t m256_frame_size = (int)(((frame_size + 7) / 8) * 8);
__m256 zero = _mm256_set1_ps(0);
for (index_t node = 0; node < node_size; ++node) {
auto x_addr = (float const *)x_ptr.GetAddr(node);
auto y_addr = (float *)y_ptr.GetAddr(node);
for (index_t frame = 0; frame < m256_frame_size; frame += 8) {
__m256 in_sig = _mm256_load_ps(&x_addr[frame]);
in_sig = _mm256_max_ps(in_sig, zero);
_mm256_store_ps(&y_addr[frame], in_sig);
}
}
return y_buf;
}
{
// 汎用版
index_t frame_size = x_buf.GetFrameSize();
index_t node_size = x_buf.GetNodeSize();
auto x_ptr = x_buf.template LockConst<RealType>();
auto y_ptr = y_buf.template Lock<BinType>();
// ReLU
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
for (index_t frame = 0; frame < frame_size; ++frame) {
auto x = x_ptr.Get(frame, node);
y_ptr.Set(frame, node, x > (RealType)0.0 ? (BinType)x : (BinType)0.0);
}
}
return y_buf;
}
}
/**
* @brief backward演算
* @detail backward演算を行う
*
* @return backward演算結果
*/
inline FrameBuffer Backward(FrameBuffer dy_buf) override
{
if (dy_buf.Empty()) {
return FrameBuffer();
}
// binaryモード
if ( DataType<BinType>::type == BB_TYPE_BIT || m_binary_mode) {
return _super::Backward(dy_buf);
}
BB_ASSERT(dy_buf.GetType() == DataType<RealType>::type);
// 戻り値のサイズ設定
FrameBuffer dx_buf(dy_buf.GetFrameSize(), dy_buf.GetShape(), dy_buf.GetType());
// FrameBuffer y_buf = this->PopFrameBuffer();
FrameBuffer x_buf = this->PopFrameBuffer();
#ifdef BB_WITH_CUDA
if ( DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only
&& x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
// GPU版
auto ptr_x = x_buf.LockDeviceMemoryConst();
auto ptr_dy = dy_buf.LockDeviceMemoryConst();
auto ptr_dx = dx_buf.LockDeviceMemory(true);
bbcu_fp32_ReLU_Backward(
(float const *)ptr_x.GetAddr(),
(float const *)ptr_dy.GetAddr(),
(float *)ptr_dx.GetAddr(),
(int )dy_buf.GetNodeSize(),
(int )dy_buf.GetFrameSize(),
(int )(dy_buf.GetFrameStride() / sizeof(float))
);
return dx_buf;
}
#endif
if ( DataType<RealType>::type == BB_TYPE_FP32 ) {
// AVX版
index_t frame_size = dx_buf.GetFrameSize();
index_t node_size = dx_buf.GetNodeSize();
auto x_ptr = x_buf.template LockConst<float>();
// auto y_ptr = y_buf.template LockConst<float>();
auto dy_ptr = dy_buf.template LockConst<float>();
auto dx_ptr = dx_buf.template Lock<float>(true);
index_t m256_frame_size = (int)(((frame_size + 7) / 8) * 8);
__m256 zero = _mm256_set1_ps(0);
for (index_t node = 0; node < node_size; ++node) {
auto x_addr = (float *)x_ptr.GetAddr(node);
auto dy_addr = (float *)dy_ptr.GetAddr(node);
auto dx_addr = (float *)dx_ptr.GetAddr(node);
for (index_t frame = 0; frame < m256_frame_size; frame += 8) {
__m256 x = _mm256_load_ps(&x_addr[frame]);
__m256 dy = _mm256_load_ps(&dy_addr[frame]);
__m256 mask = _mm256_cmp_ps(x, zero, _CMP_GT_OS);
__m256 dx = _mm256_and_ps(dy, mask);
_mm256_store_ps(&dx_addr[frame], dx);
}
}
return dx_buf;
}
{
//汎用版
index_t frame_size = dx_buf.GetFrameSize();
index_t node_size = dx_buf.GetNodeSize();
auto x_ptr = x_buf.template LockConst<BinType>();
auto dy_ptr = dy_buf.template LockConst<RealType>();
auto dx_ptr = dx_buf.template Lock<RealType>();
// ReLU
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
for (index_t frame = 0; frame < frame_size; ++frame) {
auto x = x_ptr.Get(frame, node);
auto dy = dy_ptr.Get(frame, node);
dx_ptr.Set(frame, node, (x > (BinType)0) ? dy : (RealType)0);
}
}
return dx_buf;
}
}
// シリアライズ
protected:
void DumpObjectData(std::ostream &os) const override
{
// バージョン
std::int64_t ver = 1;
bb::SaveValue(os, ver);
// 親クラス
_super::DumpObjectData(os);
// メンバ
bb::SaveValue(os, m_binary_mode);
}
void LoadObjectData(std::istream &is) override
{
// バージョン
std::int64_t ver;
bb::LoadValue(is, ver);
BB_ASSERT(ver == 1);
// 親クラス
_super::LoadObjectData(is);
// メンバ
bb::LoadValue(is, m_binary_mode);
}
};
}
// end of file |
GB_unaryop__minv_fp32_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp32_uint32
// op(A') function: GB_tran__minv_fp32_uint32
// C type: float
// A type: uint32_t
// cast: float cij = (float) aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp32_uint32
(
float *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp32_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vmul.c | //
// vmul.c : Demo of multi-target mulit-source OpenMP offload
//
#include <stdio.h>
void vmul(int*a, int*b, int*c, int N){
#pragma omp target teams map(to: a[0:N],b[0:N]) map(from:c[0:N])
#pragma omp distribute parallel for
for(int i=0;i<N;i++) {
c[i]=a[i]*b[i];
}
}
|
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <nnvm/node.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include <limits>
#include "../operator/mxnet_op.h"
#if MXNET_USE_MKLDNN == 1
#include "../operator/nn/mkldnn/mkldnn_base-inl.h"
#endif
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
#include <windows.h>
#else
#include <unistd.h>
#endif
namespace mxnet {
namespace common {
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
inline size_t current_process_id() { return ::GetCurrentProcessId(); }
#else
inline size_t current_process_id() { return getpid(); }
#endif
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr,
const nnvm::dim_t end, const nnvm::dim_t idx_size) {
if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] ||
(i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const RType* indptr, const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i+1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 ||
(j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const nnvm::dim_t end, const nnvm::dim_t nrows) {
if ((i < end && idx[i+1] <= idx[i])
|| idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template<typename xpu>
void CheckFormatWrapper(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage)
<< "CheckFormatCSRImpl is for CSRNDArray";
const mxnet::TShape shape = input.shape();
const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx);
const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const mxnet::TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) ||
(idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1, idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage)
<< "CheckFormatRSPImpl is for RSPNDArray";
const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0],
val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1, input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template<typename xpu>
void CheckFormatImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template<typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template<typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype) return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if storage type of any array in `ndarrays`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() == stype) {
return true;
}
}
}
return false;
}
/*! \brief returns true if any storage type `ndstype` in `ndstypes`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<int>& ndstypes,
const NDArrayStorageType stype) {
if (!ndstypes.empty()) {
for (const auto& ndstype : ndstypes) {
if (ndstype == stype) {
return true;
}
}
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name
<< "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log) return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning = "\nThe operator with default storage type will be dispatched "
"for execution. You're seeing this warning message because the operator above is unable "
"to process the given ndarrays with specified storage types, context and parameter. "
"Temporary dense ndarrays are generated in order to execute the operator. "
"This does not affect the correctness of the programme. "
"You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
"0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
#if MXNET_USE_MKLDNN == 1
if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. "
"You can re-enable by setting MXNET_MKLDNN_ENABLED=1");
if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set."
"Should only be set if "
"your model has variable input shapes, "
"as cache size may grow unbounded");
#endif
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template<typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+:sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len,
size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first+len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp);
ParallelSortHelper(first+len/2, len - len/2, grainsize, comp);
thr.join();
std::inplace_merge(first, first+len/2, first+len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(first, last, num_threads,
std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template<typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name,
const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask " << ctx.dev_mask();
return nullptr;
}
}
/*!
* \brief Return the max integer value representable in the type `T` without loss of precision.
*/
template <typename T>
constexpr size_t MaxIntegerValue() {
return std::is_integral<T>::value ?
std::numeric_limits<T>::max():
size_t(2) << (std::numeric_limits<T>::digits - 1);
}
template <>
constexpr size_t MaxIntegerValue<mshadow::half::half_t>() {
return size_t(2) << 10;
}
MSHADOW_XINLINE int ilog2ul(size_t a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
MSHADOW_XINLINE int ilog2ui(unsigned int a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
/*!
* \brief Return an NDArray of all zeros.
*/
inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype) {
// NDArray with default storage
if (stype == kDefaultStorage) {
NDArray ret(shape, ctx, false, dtype);
ret = 0;
return ret;
}
// NDArray with non-default storage. Storage allocation is always delayed.
return NDArray(stype, shape, ctx, true, dtype);
}
/*!
* \brief Helper to add a NDArray of zeros to a std::vector.
*/
inline void EmplaceBackZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype,
std::vector<NDArray> *vec) {
// NDArray with default storage
if (stype == kDefaultStorage) {
vec->emplace_back(shape, ctx, false, dtype);
vec->back() = 0;
} else {
// NDArray with non-default storage. Storage allocation is always delayed.
vec->emplace_back(stype, shape, ctx, true, dtype);
}
}
/*!
* \brief parallelize copy by OpenMP.
*/
template<typename DType>
inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_COPY_SIZE", 200000);
if (size >= copy_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] = src[i];
}
} else {
std::memcpy(dst, src, sizeof(DType) * size);
}
}
/*!
* \brief If numpy compatibility is turned off (default), the shapes passed in
* by users follow the legacy shape definition:
* 1. 0 ndim means the shape is completely unknown.
* 2. 0 dim size means the dim size is unknown.
* We need to convert those shapes to use the numpy shape definition:
* 1. 0 ndim means it's a scalar tensor.
* 2. -1 ndim means the shape is unknown.
* 3. 0 dim size means no elements in that dimension.
* 4. -1 dim size means the dimension's size is unknown.
* so that operator's infer shape function can work in backend.
* \param shape to be converted.
* Note: It is possible that the shape to be converted is already
* numpy compatible. For example, when a subgraph operator's infer
* shape function is called from the infer shape pass of the whole
* graph, its input/output shapes have been converted to numpy
* compatible shapes.
*/
inline void ConvertToNumpyShape(mxnet::TShape* shape) {
if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown
*shape = mxnet::TShape(); // unknown shape ndim = -1
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown
(*shape)[j] = -1; // unknown dim size = -1
}
}
}
}
inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToNumpyShape(&(shapes->at(i)));
}
}
/*!
* \brief This is function is used to convert shapes returned by
* the infer shape functions/pass to the legacy shape definition.
*/
inline void ConvertToLegacyShape(mxnet::TShape* shape) {
if (!mxnet::ndim_is_known(*shape)) {
*shape = mxnet::TShape(0, -1);
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if (!mxnet::dim_size_is_known(*shape, j)) {
(*shape)[j] = 0;
}
}
}
}
inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToLegacyShape(&(shapes->at(i)));
}
}
void ExecuteMonInputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
void ExecuteMonOutputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
/*!
* \brief This is function can return the output names of a NodeEntry.
*/
static inline std::string GetOutputName(const nnvm::NodeEntry& e) {
nnvm::Symbol sym;
sym.outputs.push_back(e);
return sym.ListOutputNames()[0];
}
inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) {
// convert negative axes to positive values
const int ndim = src.ndim();
mxnet::TShape axes = src;
for (int i = 0; i < ndim; ++i) {
if (axes[i] < 0) {
axes[i] += ndim;
}
CHECK(axes[i] >= 0 && axes[i] < ndim) << "axes[" << i << "]="
<< axes[i] << " exceeds the range ["
<< 0 << ", " << ndim << ")";
}
return axes;
}
inline bool is_float(const int dtype) {
return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16;
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
phonopy.c | /* Copyright (C) 2021 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include "dynmat.h"
#include "derivative_dynmat.h"
#include "kgrid.h"
#include "phonopy.h"
#include "tetrahedron_method.h"
#define KB 8.6173382568083159E-05
static void set_index_permutation_symmetry_fc(double * fc, const int natom);
static void set_translational_symmetry_fc(double * fc, const int natom);
static void set_translational_symmetry_compact_fc(double * fc,
const int p2s[],
const int n_satom,
const int n_patom);
static double get_free_energy(const double temperature, const double f);
static double get_entropy(const double temperature, const double f);
static double get_heat_capacity(const double temperature, const double f);
/* static double get_energy(double temperature, double f); */
static void distribute_fc2(double (*fc2)[3][3],
const int * atom_list,
const int len_atom_list,
PHPYCONST double (*r_carts)[3][3],
const int * permutations,
const int * map_atoms,
const int * map_syms,
const int num_rot,
const int num_pos);
static int nint(const double a);
void phpy_transform_dynmat_to_fc(double *fc,
const double *dm,
PHPYCONST double (*comm_points)[3],
PHPYCONST double (*shortest_vectors)[27][3],
const int *multiplicities,
const double *masses,
const int *s2pp_map,
const int *fc_index_map,
const int num_patom,
const int num_satom)
{
dym_transform_dynmat_to_fc(fc,
dm,
comm_points,
shortest_vectors,
multiplicities,
masses,
s2pp_map,
fc_index_map,
num_patom,
num_satom);
}
int phpy_get_dynamical_matrix_at_q(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int with_openmp)
{
return dym_get_dynamical_matrix_at_q(dynamical_matrix,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
mass,
s2p_map,
p2s_map,
charge_sum,
1);
}
void phpy_get_charge_sum(double (*charge_sum)[3][3],
const int num_patom,
const double factor, /* 4pi/V*unit-conv and denominator */
const double q_cart[3],
PHPYCONST double (*born)[3][3])
{
dym_get_charge_sum(charge_sum, num_patom, factor, q_cart, born);
}
void phpy_get_recip_dipole_dipole(double *dd, /* [natom, 3, natom, 3, (real,imag)] */
const double *dd_q0, /* [natom, 3, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
const double q_cart[3],
const double *q_direction_cart, /* must be pointer */
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double factor, /* 4pi/V*unit-conv */
const double lambda,
const double tolerance)
{
dym_get_recip_dipole_dipole(dd,
dd_q0,
G_list,
num_G,
num_patom,
q_cart,
q_direction_cart,
born,
dielectric,
pos,
factor,
lambda,
tolerance);
}
void phpy_get_recip_dipole_dipole_q0(double *dd_q0, /* [natom, 3, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance)
{
dym_get_recip_dipole_dipole_q0(dd_q0,
G_list,
num_G,
num_patom,
born,
dielectric,
pos,
lambda,
tolerance);
}
void phpy_get_derivative_dynmat_at_q(double *derivative_dynmat,
const int num_patom,
const int num_satom,
const double *fc,
const double *q,
const double *lattice, /* column vector */
const double *r,
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
const double nac_factor,
const double *born,
const double *dielectric,
const double *q_direction)
{
ddm_get_derivative_dynmat_at_q(derivative_dynmat,
num_patom,
num_satom,
fc,
q,
lattice,
r,
multi,
mass,
s2p_map,
p2s_map,
nac_factor,
born,
dielectric,
q_direction);
}
void phpy_get_neighboring_grid_points(size_t neighboring_grid_points[],
const size_t grid_point,
PHPYCONST int relative_grid_address[][3],
const int num_relative_grid_address,
const int mesh[3],
PHPYCONST int bz_grid_address[][3],
const size_t bz_map[])
{
thm_get_dense_neighboring_grid_points(neighboring_grid_points,
grid_point,
relative_grid_address,
num_relative_grid_address,
mesh,
bz_grid_address,
bz_map);
}
void phpy_get_relative_grid_address(int relative_grid_address[24][4][3],
PHPYCONST double reciprocal_lattice[3][3])
{
thm_get_relative_grid_address(relative_grid_address, reciprocal_lattice);
}
void phpy_get_all_relative_grid_address(int relative_grid_address[4][24][4][3])
{
thm_get_all_relative_grid_address(relative_grid_address);
}
double phpy_get_integration_weight(const double omega,
PHPYCONST double tetrahedra_omegas[24][4],
const char function)
{
return thm_get_integration_weight(omega, tetrahedra_omegas, function);
}
void
phpy_get_integration_weight_at_omegas(double *integration_weights,
const int num_omegas,
const double *omegas,
PHPYCONST double tetrahedra_omegas[24][4],
const char function)
{
thm_get_integration_weight_at_omegas(integration_weights,
num_omegas,
omegas,
tetrahedra_omegas,
function);
}
void phpy_get_tetrahedra_frequenies(double *freq_tetras,
const int mesh[3],
const size_t* grid_points,
PHPYCONST int (*grid_address)[3],
PHPYCONST int (*relative_grid_address)[3],
const size_t* gp_ir_index,
const double *frequencies,
const size_t num_band,
const size_t num_gp)
{
int is_shift[3] = {0, 0, 0};
size_t i, j, k, gp;
int g_addr[3];
int address_double[3];
/* relative_grid_address[4, 24, 3] is viewed as [96, 3]. */
for (i = 0; i < num_gp; i++) {
#pragma omp parallel for private(k, g_addr, gp, address_double)
for (j = 0; j < num_band * 96; j++) {
for (k = 0; k < 3; k++) {
g_addr[k] = grid_address[grid_points[i]][k] +
relative_grid_address[j % 96][k];
}
kgd_get_grid_address_double_mesh(address_double,
g_addr,
mesh,
is_shift);
gp = kgd_get_dense_grid_point_double_mesh(address_double, mesh);
freq_tetras[i * num_band * 96 + j] = frequencies[gp_ir_index[gp] * num_band + j / 96];
}
}
}
void phpy_tetrahedron_method_dos(double *dos,
const int mesh[3],
PHPYCONST int (*grid_address)[3],
PHPYCONST int (*relative_grid_address)[4][3],
const size_t *grid_mapping_table,
const double *freq_points,
const double *frequencies,
const double *coef,
const size_t num_freq_points,
const size_t num_ir_gp,
const size_t num_band,
const size_t num_coef,
const size_t num_gp)
{
int is_shift[3] = {0, 0, 0};
size_t i, j, k, l, m, q, r, count;
size_t ir_gps[24][4];
int g_addr[3];
double tetrahedra[24][4];
int address_double[3];
size_t *gp2ir, *ir_grid_points;
int *weights;
double iw;
gp2ir = NULL;
ir_grid_points = NULL;
weights = NULL;
gp2ir = (size_t*)malloc(sizeof(size_t) * num_gp);
ir_grid_points = (size_t*)malloc(sizeof(size_t) * num_ir_gp);
weights = (int*)malloc(sizeof(int) * num_ir_gp);
count = 0;
for (i = 0; i < num_gp; i++) {
if (grid_mapping_table[i] == i) {
gp2ir[i] = count;
ir_grid_points[count] = i;
weights[count] = 1;
count++;
} else {
gp2ir[i] = gp2ir[grid_mapping_table[i]];
weights[gp2ir[i]]++;
}
}
if (num_ir_gp != count) {
printf("Something is wrong!\n");
}
#pragma omp parallel for private(j, k, l, m, q, r, iw, ir_gps, g_addr, tetrahedra, address_double)
for (i = 0; i < num_ir_gp; i++) {
/* set 24 tetrahedra */
for (l = 0; l < 24; l++) {
for (q = 0; q < 4; q++) {
for (r = 0; r < 3; r++) {
g_addr[r] = grid_address[ir_grid_points[i]][r] +
relative_grid_address[l][q][r];
}
kgd_get_grid_address_double_mesh(address_double,
g_addr,
mesh,
is_shift);
ir_gps[l][q] = gp2ir[kgd_get_grid_point_double_mesh(address_double, mesh)];
}
}
for (k = 0; k < num_band; k++) {
for (l = 0; l < 24; l++) {
for (q = 0; q < 4; q++) {
tetrahedra[l][q] = frequencies[ir_gps[l][q] * num_band + k];
}
}
for (j = 0; j < num_freq_points; j++) {
iw = thm_get_integration_weight(freq_points[j], tetrahedra, 'I') * weights[i];
for (m = 0; m < num_coef; m++) {
dos[i * num_band * num_freq_points * num_coef +
k * num_coef * num_freq_points + j * num_coef + m] +=
iw * coef[i * num_coef * num_band + m * num_band + k];
}
}
}
}
free(gp2ir);
gp2ir = NULL;
free(ir_grid_points);
ir_grid_points = NULL;
free(weights);
weights = NULL;
}
void phpy_get_thermal_properties(double *thermal_props,
const double *temperatures,
const double *freqs,
const int *weights,
const int num_temp,
const int num_qpoints,
const int num_bands,
const double cutoff_frequency)
{
int i, j, k;
double f;
double *tp;
tp = (double*)malloc(sizeof(double) * num_qpoints * num_temp * 3);
for (i = 0; i < num_qpoints * num_temp * 3; i++) {
tp[i] = 0;
}
#pragma omp parallel for private(j, k, f)
for (i = 0; i < num_qpoints; i++){
for (j = 0; j < num_temp; j++) {
for (k = 0; k < num_bands; k++){
f = freqs[i * num_bands + k];
if (temperatures[j] > 0 && f > cutoff_frequency) {
tp[i * num_temp * 3 + j * 3] +=
get_free_energy(temperatures[j], f) * weights[i];
tp[i * num_temp * 3 + j * 3 + 1] +=
get_entropy(temperatures[j], f) * weights[i];
tp[i * num_temp * 3 + j * 3 + 2] +=
get_heat_capacity(temperatures[j], f) * weights[i];
}
}
}
}
for (i = 0; i < num_qpoints; i++) {
for (j = 0; j < num_temp * 3; j++) {
thermal_props[j] += tp[i * num_temp * 3 + j];
}
}
free(tp);
tp = NULL;
}
void phpy_distribute_fc2(double (*fc2)[3][3],
const int * atom_list,
const int len_atom_list,
PHPYCONST double (*r_carts)[3][3],
const int * permutations,
const int * map_atoms,
const int * map_syms,
const int num_rot,
const int num_pos)
{
distribute_fc2(fc2,
atom_list,
len_atom_list,
r_carts,
permutations,
map_atoms,
map_syms,
num_rot,
num_pos);
}
int phpy_compute_permutation(int * rot_atom,
PHPYCONST double lat[3][3],
PHPYCONST double (*pos)[3],
PHPYCONST double (*rot_pos)[3],
const int num_pos,
const double symprec)
{
int i,j,k,l;
int search_start;
double distance2, diff_cart;
double diff[3];
for (i = 0; i < num_pos; i++) {
rot_atom[i] = -1;
}
/* optimization: Iterate primarily by pos instead of rot_pos. */
/* (find where 0 belongs in rot_atom, then where 1 belongs, etc.) */
/* Then track the first unassigned index. */
/* */
/* This works best if the permutation is close to the identity. */
/* (more specifically, if the max value of 'rot_atom[i] - i' is small) */
search_start = 0;
for (i = 0; i < num_pos; i++) {
while (rot_atom[search_start] >= 0) {
search_start++;
}
for (j = search_start; j < num_pos; j++) {
if (rot_atom[j] >= 0) {
continue;
}
for (k = 0; k < 3; k++) {
diff[k] = pos[i][k] - rot_pos[j][k];
diff[k] -= nint(diff[k]);
}
distance2 = 0;
for (k = 0; k < 3; k++) {
diff_cart = 0;
for (l = 0; l < 3; l++) {
diff_cart += lat[k][l] * diff[l];
}
distance2 += diff_cart * diff_cart;
}
if (sqrt(distance2) < symprec) {
rot_atom[j] = i;
break;
}
}
}
for (i = 0; i < num_pos; i++) {
if (rot_atom[i] < 0) {
return 0;
}
}
return 1;
}
/* Implementation detail of get_smallest_vectors. */
/* Finds the smallest vectors within each list and copies them to the output. */
void phpy_copy_smallest_vectors(double (*shortest_vectors)[27][3],
int * multiplicity,
PHPYCONST double (*vector_lists)[27][3],
PHPYCONST double (*length_lists)[27],
const int num_lists,
const double symprec)
{
int i,j,k;
int count;
double minimum;
double (*vectors)[3];
double *lengths;
for (i = 0; i < num_lists; i++) {
/* Look at a single list of 27 vectors. */
lengths = length_lists[i];
vectors = vector_lists[i];
/* Compute the minimum length. */
minimum = DBL_MAX;
for (j = 0; j < 27; j++) {
if (lengths[j] < minimum) {
minimum = lengths[j];
}
}
/* Copy vectors whose length is within tolerance. */
count = 0;
for (j = 0; j < 27; j++) {
if (lengths[j] - minimum <= symprec) {
for (k = 0; k < 3; k++) {
shortest_vectors[i][count][k] = vectors[j][k];
}
count++;
}
}
multiplicity[i] = count;
}
}
void phpy_set_smallest_vectors(double (*smallest_vectors)[27][3],
int *multiplicity,
PHPYCONST double (*pos_to)[3],
const int num_pos_to,
PHPYCONST double (*pos_from)[3],
const int num_pos_from,
PHPYCONST int (*lattice_points)[3],
const int num_lattice_points,
PHPYCONST double reduced_basis[3][3],
PHPYCONST int trans_mat[3][3],
const double symprec)
{
int i, j, k, l, count;
double length_tmp, minimum, vec_xyz;
double *length;
double (*vec)[3];
length = (double*)malloc(sizeof(double) * num_lattice_points);
vec = (double(*)[3])malloc(sizeof(double[3]) * num_lattice_points);
for (i = 0; i < num_pos_to; i++) {
for (j = 0; j < num_pos_from; j++) {
for (k = 0; k < num_lattice_points; k++) {
length[k] = 0;
for (l = 0; l < 3; l++) {
vec[k][l] = pos_to[i][l] - pos_from[j][l] + lattice_points[k][l];
}
for (l = 0; l < 3; l++) {
length_tmp = (reduced_basis[l][0] * vec[k][0] +
reduced_basis[l][1] * vec[k][1] +
reduced_basis[l][2] * vec[k][2]);
length[k] += length_tmp * length_tmp;
}
length[k] = sqrt(length[k]);
}
minimum = DBL_MAX;
for (k = 0; k < num_lattice_points; k++) {
if (length[k] < minimum) {
minimum = length[k];
}
}
count = 0;
for (k = 0; k < num_lattice_points; k++) {
if (length[k] - minimum < symprec) {
for (l = 0; l < 3; l++) {
/* Transform to supercell coordinates */
vec_xyz = (trans_mat[l][0] * vec[k][0] +
trans_mat[l][1] * vec[k][1] +
trans_mat[l][2] * vec[k][2]);
smallest_vectors[i * num_pos_from + j][count][l] = vec_xyz;
}
count++;
}
}
if (count > 27) { /* should not be greater than 27 */
printf("Warning (gsv_set_smallest_vectors): ");
printf("number of shortest vectors is out of range,\n");
break;
} else {
multiplicity[i * num_pos_from + j] = count;
}
}
}
free(length);
length = NULL;
free(vec);
vec = NULL;
}
void phpy_perm_trans_symmetrize_fc(double *fc,
const int n_satom,
const int level)
{
int i, j, k, l, iter;
double sum;
for (iter = 0; iter < level; iter++) {
/* Subtract drift along column */
for (j = 0; j < n_satom; j++) {
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
sum = 0;
for (i = 0; i < n_satom; i++) {
sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l];
}
sum /= n_satom;
for (i = 0; i < n_satom; i++) {
fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum;
}
}
}
}
/* Subtract drift along row */
for (i = 0; i < n_satom; i++) {
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
sum = 0;
for (j = 0; j < n_satom; j++) {
sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l];
}
sum /= n_satom;
for (j = 0; j < n_satom; j++) {
fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum;
}
}
}
}
set_index_permutation_symmetry_fc(fc, n_satom);
}
set_translational_symmetry_fc(fc, n_satom);
}
void phpy_perm_trans_symmetrize_compact_fc(double *fc,
const int p2s[],
const int s2pp[],
const int nsym_list[],
const int perms[],
const int n_satom,
const int n_patom,
const int level)
{
int i, j, k, l, n, iter;
double sum;
for (iter=0; iter < level; iter++) {
for (n = 0; n < 2; n++) {
/* transpose only */
phpy_set_index_permutation_symmetry_compact_fc(fc,
p2s,
s2pp,
nsym_list,
perms,
n_satom,
n_patom,
1);
for (i = 0; i < n_patom; i++) {
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
sum = 0;
for (j = 0; j < n_satom; j++) {
sum += fc[i * n_satom * 9 + j * 9 + k * 3 + l];
}
sum /= n_satom;
for (j = 0; j < n_satom; j++) {
fc[i * n_satom * 9 + j * 9 + k * 3 + l] -= sum;
}
}
}
}
}
phpy_set_index_permutation_symmetry_compact_fc(fc,
p2s,
s2pp,
nsym_list,
perms,
n_satom,
n_patom,
0);
}
set_translational_symmetry_compact_fc(fc, p2s, n_satom, n_patom);
}
void phpy_set_index_permutation_symmetry_compact_fc(double * fc,
const int p2s[],
const int s2pp[],
const int nsym_list[],
const int perms[],
const int n_satom,
const int n_patom,
const int is_transpose)
{
int i, j, k, l, m, n, i_p, j_p, i_trans;
double fc_elem;
char *done;
done = NULL;
done = (char*)malloc(sizeof(char) * n_satom * n_patom);
for (i = 0; i < n_satom * n_patom; i++) {
done[i] = 0;
}
for (j = 0; j < n_satom; j++) {
j_p = s2pp[j];
for (i_p = 0; i_p < n_patom; i_p++) {
i = p2s[i_p];
if (i == j) { /* diagnoal part */
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
if (l > k) {
m = i_p * n_satom * 9 + i * 9 + k * 3 + l;
n = i_p * n_satom * 9 + i * 9 + l * 3 + k;
if (is_transpose) {
fc_elem = fc[m];
fc[m] = fc[n];
fc[n] = fc_elem;
} else {
fc[m] = (fc[m] + fc[n]) / 2;
fc[n] = fc[m];
}
}
}
}
}
if (!done[i_p * n_satom + j]) {
/* (j, i) -- nsym_list[j] --> (j', i') */
/* nsym_list[j] translates j to j' where j' is in */
/* primitive cell. The same translation sends i to i' */
/* where i' is not necessarily to be in primitive cell. */
/* Thus, i' = perms[nsym_list[j] * n_satom + i] */
i_trans = perms[nsym_list[j] * n_satom + i];
done[i_p * n_satom + j] = 1;
done[j_p * n_satom + i_trans] = 1;
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
m = i_p * n_satom * 9 + j * 9 + k * 3 + l;
n = j_p * n_satom * 9 + i_trans * 9 + l * 3 + k;
if (is_transpose) {
fc_elem = fc[m];
fc[m] = fc[n];
fc[n] = fc_elem;
} else {
fc[m] = (fc[n] + fc[m]) / 2;
fc[n] = fc[m];
}
}
}
}
}
}
free(done);
done = NULL;
}
static void set_index_permutation_symmetry_fc(double * fc,
const int natom)
{
int i, j, k, l, m, n;
for (i = 0; i < natom; i++) {
/* non diagonal part */
for (j = i + 1; j < natom; j++) {
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
m = i * natom * 9 + j * 9 + k * 3 + l;
n = j * natom * 9 + i * 9 + l * 3 + k;
fc[m] += fc[n];
fc[m] /= 2;
fc[n] = fc[m];
}
}
}
/* diagnoal part */
for (k = 0; k < 2; k++) {
for (l = k + 1; l < 3; l++) {
m = i * natom * 9 + i * 9 + k * 3 + l;
n = i * natom * 9 + i * 9 + l * 3 + k;
fc[m] += fc[n];
fc[m] /= 2;
fc[n] = fc[m];
}
}
}
}
static void set_translational_symmetry_fc(double * fc,
const int natom)
{
int i, j, k, l, m;
double sums[3][3];
for (i = 0; i < natom; i++) {
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
sums[k][l] = 0;
m = i * natom * 9 + k * 3 + l;
for (j = 0; j < natom; j++) {
if (i != j) {
sums[k][l] += fc[m];
}
m += 9;
}
}
}
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
fc[i * natom * 9 + i * 9 + k * 3 + l] = -(sums[k][l] + sums[l][k]) / 2;
}
}
}
}
static void set_translational_symmetry_compact_fc(double * fc,
const int p2s[],
const int n_satom,
const int n_patom)
{
int j, k, l, m, i_p;
double sums[3][3];
for (i_p = 0; i_p < n_patom; i_p++) {
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
sums[k][l] = 0;
m = i_p * n_satom * 9 + k * 3 + l;
for (j = 0; j < n_satom; j++) {
if (p2s[i_p] != j) {
sums[k][l] += fc[m];
}
m += 9;
}
}
}
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
fc[i_p * n_satom * 9 + p2s[i_p] * 9 + k * 3 + l] =
-(sums[k][l] + sums[l][k]) / 2;
}
}
}
}
static double get_free_energy(const double temperature, const double f)
{
/* temperature is defined by T (K) */
/* 'f' must be given in eV. */
return KB * temperature * log(1 - exp(- f / (KB * temperature)));
}
static double get_entropy(const double temperature, const double f)
{
/* temperature is defined by T (K) */
/* 'f' must be given in eV. */
double val;
val = f / (2 * KB * temperature);
return 1 / (2 * temperature) * f * cosh(val) / sinh(val) - KB * log(2 * sinh(val));
}
static double get_heat_capacity(const double temperature, const double f)
{
/* temperature is defined by T (K) */
/* 'f' must be given in eV. */
/* If val is close to 1. Then expansion is used. */
double val, val1, val2;
val = f / (KB * temperature);
val1 = exp(val);
val2 = (val) / (val1 - 1);
return KB * val1 * val2 * val2;
}
static void distribute_fc2(double (*fc2)[3][3], /* shape[n_pos][n_pos] */
const int * atom_list,
const int len_atom_list,
PHPYCONST double (*r_carts)[3][3], /* shape[n_rot] */
const int * permutations, /* shape[n_rot][n_pos] */
const int * map_atoms, /* shape [n_pos] */
const int * map_syms, /* shape [n_pos] */
const int num_rot,
const int num_pos)
{
int i, j, k, l, m;
int atom_todo, atom_done, atom_other;
int sym_index;
int *atom_list_reverse;
double (*fc2_done)[3];
double (*fc2_todo)[3];
double (*r_cart)[3];
const int * permutation;
atom_list_reverse = NULL;
atom_list_reverse = (int*)malloc(sizeof(int) * num_pos);
/* atom_list_reverse[!atom_done] is undefined. */
for (i = 0; i < len_atom_list; i++) {
atom_done = map_atoms[atom_list[i]];
if (atom_done == atom_list[i]) {
atom_list_reverse[atom_done] = i;
}
}
for (i = 0; i < len_atom_list; i++) {
/* look up how this atom maps into the done list. */
atom_todo = atom_list[i];
atom_done = map_atoms[atom_todo];
sym_index = map_syms[atom_todo];
/* skip the atoms in the done list, */
/* which are easily identified because they map to themselves. */
if (atom_todo == atom_done) {
continue;
}
/* look up information about the rotation */
r_cart = r_carts[sym_index];
permutation = &permutations[sym_index * num_pos]; /* shape[num_pos] */
/* distribute terms from atom_done to atom_todo */
for (atom_other = 0; atom_other < num_pos; atom_other++) {
fc2_done = fc2[atom_list_reverse[atom_done] * num_pos + permutation[atom_other]];
fc2_todo = fc2[i * num_pos + atom_other];
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
/* P' = R^-1 P R */
fc2_todo[j][k] += r_cart[l][j] * r_cart[m][k] * fc2_done[l][m];
}
}
}
}
}
}
free(atom_list_reverse);
atom_list_reverse = NULL;
}
/* static double get_energy(double temperature, double f){ */
/* /\* temperature is defined by T (K) *\/ */
/* /\* 'f' must be given in eV. *\/ */
/* return f / (exp(f / (KB * temperature)) - 1); */
/* } */
static int nint(const double a)
{
if (a < 0.0)
return (int) (a - 0.5);
else
return (int) (a + 0.5);
}
|
displacement_lagrangemultiplier_frictional_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "utilities/color_utilities.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "custom_utilities/active_set_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierFrictionalContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* @details This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierFrictionalContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierFrictionalContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierFrictionalContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( PURE_SLIP );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The r_table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
///@}
///@name Life Cycle
///@{
/// Constructor.
/**
* @param DispRatioTolerance Relative tolerance for displacement error
* @param DispAbsTolerance Absolute tolerance for displacement error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier error
* @param NormalTangentRatio Ratio between the normal and tangent that will accepted as converged
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierFrictionalContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const TDataType LMNormalRatioTolerance,
const TDataType LMNormalAbsTolerance,
const TDataType LMTangentRatioTolerance,
const TDataType LMTangentAbsTolerance,
const TDataType NormalTangentRatio,
const bool EnsureContact = false,
const bool PureSlip = false,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP, PureSlip);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, false);
// The displacement solution
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
// The normal contact solution
mLMNormalRatioTolerance = LMNormalRatioTolerance;
mLMNormalAbsTolerance = LMNormalAbsTolerance;
// The tangent contact solution
mLMTangentRatioTolerance = LMTangentRatioTolerance;
mLMTangentAbsTolerance = LMTangentAbsTolerance;
// We get the ratio between the normal and tangent that will accepted as converged
mNormalTangentRatio = NormalTangentRatio;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierFrictionalContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: BaseType()
{
// The default parameters
Parameters default_parameters = Parameters(R"(
{
"ensure_contact" : false,
"pure_slip" : false,
"print_convergence_criterion" : false,
"displacement_relative_tolerance" : 1.0e-4,
"displacement_absolute_tolerance" : 1.0e-9,
"contact_displacement_relative_tolerance" : 1.0e-4,
"contact_displacement_absolute_tolerance" : 1.0e-9,
"frictional_contact_displacement_relative_tolerance" : 1.0e-4,
"frictional_contact_displacement_absolute_tolerance" : 1.0e-9,
"ratio_normal_tangent_threshold" : 1.0e-4
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// The displacement solution
mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble();
// The normal contact solution
mLMNormalRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble();
mLMNormalAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
// The tangent contact solution
mLMTangentRatioTolerance = ThisParameters["frictional_contact_displacement_relative_tolerance"].GetDouble();
mLMTangentAbsTolerance = ThisParameters["frictional_contact_displacement_absolute_tolerance"].GetDouble();
// We get the ratio between the normal and tangent that will accepted as converged
mNormalTangentRatio = ThisParameters["ratio_normal_tangent_threshold"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP, ThisParameters["pure_slip"].GetBool());
}
//* Copy constructor.
DisplacementLagrangeMultiplierFrictionalContactCriteria( DisplacementLagrangeMultiplierFrictionalContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mLMNormalRatioTolerance(rOther.mLMNormalRatioTolerance)
,mLMNormalAbsTolerance(rOther.mLMNormalAbsTolerance)
,mLMTangentRatioTolerance(rOther.mLMTangentRatioTolerance)
,mLMTangentAbsTolerance(rOther.mLMTangentAbsTolerance)
,mNormalTangentRatio(rOther.mNormalTangentRatio)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierFrictionalContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something
// Getting process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Compute the active set
if (!r_process_info[ACTIVE_SET_COMPUTED]) {
const array_1d<std::size_t, 2> is_converged = ActiveSetUtilities::ComputeALMFrictionalActiveSet(rModelPart, mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP), this->GetEchoLevel());
// We save to the process info if the active set has converged
r_process_info[ACTIVE_SET_CONVERGED] = is_converged[0] == 0 ? true : false;
r_process_info[SLIP_SET_CONVERGED] = is_converged[1] == 0 ? true : false;
r_process_info[ACTIVE_SET_COMPUTED] = true;
}
// Initialize
TDataType disp_solution_norm = 0.0, normal_lm_solution_norm = 0.0, tangent_lm_stick_solution_norm = 0.0, tangent_lm_slip_solution_norm = 0.0, disp_increase_norm = 0.0, normal_lm_increase_norm = 0.0, tangent_lm_stick_increase_norm = 0.0, tangent_lm_slip_increase_norm = 0.0;
IndexType disp_dof_num(0), lm_dof_num(0), lm_stick_dof_num(0), lm_slip_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// The nodes array
auto& r_nodes_array = rModelPart.Nodes();
// Auxiliar values
std::size_t dof_id = 0;
TDataType dof_value = 0.0, dof_incr = 0.0;
// Loop over Dofs
#pragma omp parallel for reduction(+:disp_solution_norm, normal_lm_solution_norm, tangent_lm_slip_solution_norm, tangent_lm_stick_solution_norm, disp_increase_norm, normal_lm_increase_norm, tangent_lm_slip_increase_norm, tangent_lm_stick_increase_norm, disp_dof_num, lm_dof_num, lm_stick_dof_num, lm_slip_dof_num, dof_id, dof_value, dof_incr)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
if (it_dof->IsFree()) {
dof_id = it_dof->EquationId();
dof_value = it_dof->GetSolutionStepValue(0);
dof_incr = rDx[dof_id];
const auto curr_var = it_dof->GetVariable();
if (curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) {
// The normal of the node (TODO: how to solve this without accesing all the time to the database?)
const auto it_node = r_nodes_array.find(it_dof->Id());
const double normal_x = it_node->FastGetSolutionStepValue(NORMAL_X);
const TDataType normal_dof_value = dof_value * normal_x;
const TDataType normal_dof_incr = dof_incr * normal_x;
normal_lm_solution_norm += std::pow(normal_dof_value, 2);
normal_lm_increase_norm += std::pow(normal_dof_incr, 2);
if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) {
tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_slip_dof_num;
} else {
tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_stick_dof_num;
}
lm_dof_num++;
} else if (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) {
// The normal of the node (TODO: how to solve this without accesing all the time to the database?)
const auto it_node = r_nodes_array.find(it_dof->Id());
const double normal_y = it_node->FastGetSolutionStepValue(NORMAL_Y);
const TDataType normal_dof_value = dof_value * normal_y;
const TDataType normal_dof_incr = dof_incr * normal_y;
normal_lm_solution_norm += std::pow(normal_dof_value, 2);
normal_lm_increase_norm += std::pow(normal_dof_incr, 2);
if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) {
tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_slip_dof_num;
} else {
tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_stick_dof_num;
}
lm_dof_num++;
} else if (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) {
// The normal of the node (TODO: how to solve this without accesing all the time to the database?)
const auto it_node = r_nodes_array.find(it_dof->Id());
const double normal_z = it_node->FastGetSolutionStepValue(NORMAL_Z);
const TDataType normal_dof_value = dof_value * normal_z;
const TDataType normal_dof_incr = dof_incr * normal_z;
normal_lm_solution_norm += std::pow(normal_dof_value, 2);
normal_lm_increase_norm += std::pow(normal_dof_incr, 2);
if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) {
tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_slip_dof_num;
} else {
tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_stick_dof_num;
}
lm_dof_num++;
} else {
disp_solution_norm += dof_value * dof_value;
disp_increase_norm += dof_incr * dof_incr;
disp_dof_num++;
}
}
}
if(disp_increase_norm == 0.0) disp_increase_norm = 1.0;
if(normal_lm_increase_norm == 0.0) normal_lm_increase_norm = 1.0;
if(tangent_lm_stick_increase_norm == 0.0) tangent_lm_stick_increase_norm = 1.0;
if(tangent_lm_slip_increase_norm == 0.0) tangent_lm_slip_increase_norm = 1.0;
if(disp_solution_norm == 0.0) disp_solution_norm = 1.0;
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT) && normal_lm_solution_norm == 0.0) << "WARNING::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
const TDataType disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm);
const TDataType normal_lm_ratio = std::sqrt(normal_lm_increase_norm/normal_lm_solution_norm);
const TDataType tangent_lm_stick_ratio = std::sqrt(tangent_lm_stick_increase_norm/tangent_lm_stick_solution_norm);
const TDataType tangent_lm_slip_ratio = std::sqrt(tangent_lm_slip_increase_norm/tangent_lm_slip_solution_norm);
const TDataType disp_abs = std::sqrt(disp_increase_norm)/ static_cast<TDataType>(disp_dof_num);
const TDataType normal_lm_abs = std::sqrt(normal_lm_increase_norm)/ static_cast<TDataType>(lm_dof_num);
const TDataType tangent_lm_stick_abs = lm_stick_dof_num > 0 ? std::sqrt(tangent_lm_stick_increase_norm)/ static_cast<TDataType>(lm_stick_dof_num) : 0.0;
const TDataType tangent_lm_slip_abs = lm_slip_dof_num > 0 ? std::sqrt(tangent_lm_slip_increase_norm)/ static_cast<TDataType>(lm_slip_dof_num) : 0.0;
const TDataType normal_tangent_stick_ratio = tangent_lm_stick_abs/normal_lm_abs;
const TDataType normal_tangent_slip_ratio = tangent_lm_slip_abs/normal_lm_abs;
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
Table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << normal_lm_ratio << mLMNormalRatioTolerance << normal_lm_abs << mLMNormalAbsTolerance << tangent_lm_stick_ratio << mLMTangentRatioTolerance << tangent_lm_stick_abs << mLMTangentAbsTolerance << tangent_lm_slip_ratio << mLMTangentRatioTolerance << tangent_lm_slip_abs << mLMTangentAbsTolerance;
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" NORMAL LAGRANGE MUL:\tRATIO = ") << normal_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMNormalRatioTolerance << BOLDFONT(" ABS = ") << normal_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMNormalAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" STICK LAGRANGE MUL:\tRATIO = ") << tangent_lm_stick_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentRatioTolerance << BOLDFONT(" ABS = ") << tangent_lm_stick_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" SLIP LAGRANGE MUL:\tRATIO = ") << tangent_lm_slip_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentRatioTolerance << BOLDFONT(" ABS = ") << tangent_lm_slip_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " NORMAL LAGRANGE MUL:\tRATIO = " << normal_lm_ratio << " EXP.RATIO = " << mLMNormalRatioTolerance << " ABS = " << normal_lm_abs << " EXP.ABS = " << mLMNormalAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " STICK LAGRANGE MUL:\tRATIO = " << tangent_lm_stick_ratio << " EXP.RATIO = " << mLMTangentRatioTolerance << " ABS = " << tangent_lm_stick_abs << " EXP.ABS = " << mLMTangentAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " SLIP LAGRANGE MUL:\tRATIO = " << tangent_lm_slip_ratio << " EXP.RATIO = " << mLMTangentRatioTolerance << " ABS = " << tangent_lm_slip_abs << " EXP.ABS = " << mLMTangentAbsTolerance << std::endl;
}
}
}
// We check if converged
const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance);
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT) && normal_lm_solution_norm == 0.0) ? true : (normal_lm_ratio <= mLMNormalRatioTolerance || normal_lm_abs <= mLMNormalAbsTolerance) && (tangent_lm_stick_ratio <= mLMTangentRatioTolerance || tangent_lm_stick_abs <= mLMTangentAbsTolerance || normal_tangent_stick_ratio <= mNormalTangentRatio) && (tangent_lm_slip_ratio <= mLMTangentRatioTolerance || tangent_lm_slip_abs <= mLMTangentAbsTolerance || normal_tangent_slip_ratio <= mNormalTangentRatio);
if (disp_converged && lm_converged) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FGRN(" Achieved"));
else
r_table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDoF convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDoF convergence is not achieved" << std::endl;
}
}
return false;
}
}
else // In this case all the displacements are imposed!
return true;
}
/**
* This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart ) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("N.LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) {
r_table.AddColumn("STI. RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
}
r_table.AddColumn("SLIP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function finalizes the non-linear iteration
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual + reactions)
*/
void FinalizeNonLinearIteration(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Calling base criteria
BaseType::FinalizeNonLinearIteration(rModelPart, rDofSet, rA, rDx, rb);
// The current process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
r_process_info.SetValue(ACTIVE_SET_COMPUTED, false);
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement
TDataType mLMNormalRatioTolerance; /// The ratio threshold for the norm of the LM (normal)
TDataType mLMNormalAbsTolerance; /// The absolute value threshold for the norm of the LM (normal)
TDataType mLMTangentRatioTolerance; /// The ratio threshold for the norm of the LM (tangent)
TDataType mLMTangentAbsTolerance; /// The absolute value threshold for the norm of the LM (tangent)
TDataType mNormalTangentRatio; /// The ratio to accept a non converged tangent component in case
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierFrictionalContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PURE_SLIP(Kratos::Flags::Create(3));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_PURE_SLIP(Kratos::Flags::Create(3, false));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H */
|
convolution_2x2.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv2x2s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
int q = 0;
for (; q + 1 < inch; q += 2)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q + 1);
const float* kernel0 = kernel + p * inch * 4 + q * 4;
const float* kernel1 = kernel0 + 4;
const float* r00 = img0;
const float* r01 = img0 + w;
const float* r10 = img1;
const float* r11 = img1 + w;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(kernel0);
float32x4_t _k1 = vld1q_f32(kernel1);
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.4s}, [%2], #16 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v12.4s}, [%3], #16 \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v14.4s}, [%4], #16 \n"
"0: \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v9.4s}, [%5] \n"
"fmul v8.4s, v0.4s, %12.s[0] \n"
"fmla v9.4s, v2.4s, %12.s[2] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v1.4s}, [%1], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v3.4s}, [%2], #16 \n"
"ext v10.16b, v0.16b, v1.16b, #4 \n"
"ext v11.16b, v2.16b, v3.16b, #4 \n"
"fmla v8.4s, v12.4s, %13.s[0] \n"
"fmla v9.4s, v14.4s, %13.s[2] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v13.4s}, [%3], #16 \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v15.4s}, [%4], #16 \n"
"fmla v8.4s, v10.4s, %12.s[1] \n"
"fmla v9.4s, v11.4s, %12.s[3] \n"
"ext v10.16b, v12.16b, v13.16b, #4 \n"
"ext v11.16b, v14.16b, v15.16b, #4 \n"
"fmla v8.4s, v10.4s, %13.s[1] \n"
"fmla v9.4s, v11.4s, %13.s[3] \n"
"orr v0.16b, v1.16b, v1.16b \n"
"orr v2.16b, v3.16b, v3.16b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"orr v12.16b, v13.16b, v13.16b \n"
"orr v14.16b, v15.16b, v15.16b \n"
"subs %w0, %w0, #1 \n"
"st1 {v8.4s}, [%5], #16 \n"
"bne 0b \n"
"sub %1, %1, #16 \n"
"sub %2, %2, #16 \n"
"sub %3, %3, #16 \n"
"sub %4, %4, #16 \n"
: "=r"(nn), // %0
"=r"(r00), // %1
"=r"(r01), // %2
"=r"(r10), // %3
"=r"(r11), // %4
"=r"(outptr) // %5
: "0"(nn),
"1"(r00),
"2"(r01),
"3"(r10),
"4"(r11),
"5"(outptr),
"w"(_k0), // %12
"w"(_k1) // %13
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d4-d5}, [%2]! \n"
"pld [%3, #128] \n"
"vld1.f32 {d24-d25}, [%3]! \n"
"pld [%4, #128] \n"
"vld1.f32 {d28-d29}, [%4]! \n"
"0: \n"
"pld [%5, #128] \n"
"vld1.f32 {d18-d19}, [%5] \n" // q9 = sum
"vmul.f32 q8, q0, %e12[0] \n"
"vmla.f32 q9, q2, %f12[0] \n"
"pld [%1, #128] \n"
"vld1.f32 {d2-d3}, [%1]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d6-d7}, [%2]! \n"
"vext.f32 q10, q0, q1, #1 \n"
"vext.f32 q11, q2, q3, #1 \n"
"vmla.f32 q8, q12, %e13[0] \n"
"vmla.f32 q9, q14, %f13[0] \n"
"pld [%3, #128] \n"
"vld1.f32 {d26-d27}, [%3]! \n"
"pld [%4, #128] \n"
"vld1.f32 {d30-d31}, [%4]! \n"
"vmla.f32 q8, q10, %e12[1] \n"
"vmla.f32 q9, q11, %f12[1] \n"
"vext.f32 q10, q12, q13, #1 \n"
"vext.f32 q11, q14, q15, #1 \n"
"vmla.f32 q8, q10, %e13[1] \n"
"vmla.f32 q9, q11, %f13[1] \n"
"vorr q0, q1, q1 \n"
"vorr q2, q3, q3 \n"
"vadd.f32 q8, q8, q9 \n"
"vorr q12, q13, q13 \n"
"vorr q14, q15, q15 \n"
"subs %0, #1 \n"
"vst1.f32 {d16-d17}, [%5]! \n"
"bne 0b \n"
"sub %1, #16 \n"
"sub %2, #16 \n"
"sub %3, #16 \n"
"sub %4, #16 \n"
: "=r"(nn), // %0
"=r"(r00), // %1
"=r"(r01), // %2
"=r"(r10), // %3
"=r"(r11), // %4
"=r"(outptr) // %5
: "0"(nn),
"1"(r00),
"2"(r01),
"3"(r10),
"4"(r11),
"5"(outptr),
"w"(_k0), // %12
"w"(_k1) // %13
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
#if __ARM_NEON
float32x2_t _r00 = vld1_f32(r00);
float32x2_t _r01 = vld1_f32(r01);
float32x4_t _r00r1 = vcombine_f32(_r00, _r01);
float32x4_t _s0s1 = vmulq_f32(_r00r1, _k0);
float32x2_t _r10 = vld1_f32(r10);
float32x2_t _r11 = vld1_f32(r11);
float32x4_t _r10r1 = vcombine_f32(_r10, _r11);
_s0s1 = vmlaq_f32(_s0s1, _r10r1, _k1);
float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1));
_s = vpadd_f32(_s, _s);
*outptr += vget_lane_f32(_s, 0);
#else
float sum = 0.f;
sum += r00[0] * kernel0[0];
sum += r00[1] * kernel0[1];
sum += r01[0] * kernel0[2];
sum += r01[1] * kernel0[3];
sum += r10[0] * kernel1[0];
sum += r10[1] * kernel1[1];
sum += r11[0] * kernel1[2];
sum += r11[1] * kernel1[3];
*outptr += sum;
#endif // __ARM_NEON
r00 += 1;
r01 += 1;
r10 += 1;
r11 += 1;
outptr++;
}
r00 += 1;
r01 += 1;
r10 += 1;
r11 += 1;
}
}
for (; q < inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 4 + q * 4;
const float* r0 = img0;
const float* r1 = img0 + w;
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(kernel0[0]);
float32x4_t _k1 = vdupq_n_f32(kernel0[1]);
float32x4_t _k2 = vdupq_n_f32(kernel0[2]);
float32x4_t _k3 = vdupq_n_f32(kernel0[3]);
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.4s}, [%2], #16 \n"
"0: \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v9.4s}, [%3] \n"
"fmul v8.4s, v0.4s, %8.4s \n"
"fmla v9.4s, v2.4s, %10.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v1.4s}, [%1], #16 \n"
"ext v10.16b, v0.16b, v1.16b, #4 \n"
"fmla v8.4s, v10.4s, %9.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v3.4s}, [%2], #16 \n"
"ext v11.16b, v2.16b, v3.16b, #4 \n"
"fmla v9.4s, v11.4s, %11.4s \n"
"orr v0.16b, v1.16b, v1.16b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"orr v2.16b, v3.16b, v3.16b \n"
"subs %w0, %w0, #1 \n"
"st1 {v8.4s}, [%3], #16 \n"
"bne 0b \n"
"sub %1, %1, #16 \n"
"sub %2, %2, #16 \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr),
"w"(_k0), // %8
"w"(_k1), // %9
"w"(_k2), // %10
"w"(_k3) // %11
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11");
}
#else
if (nn > 0)
{
asm volatile(
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d4-d5}, [%2]! \n"
"0: \n"
"pld [%3, #128] \n"
"vld1.f32 {d18-d19}, [%3] \n" // q9 = sum
"vmul.f32 q8, q0, %q8 \n"
"vmla.f32 q9, q2, %q10 \n"
"pld [%1, #128] \n"
"vld1.f32 {d2-d3}, [%1]! \n"
"vext.f32 q10, q0, q1, #1 \n"
"vmla.f32 q8, q10, %q9 \n"
"pld [%2, #128] \n"
"vld1.f32 {d6-d7}, [%2]! \n"
"vext.f32 q11, q2, q3, #1 \n"
"vmla.f32 q9, q11, %q11 \n"
"vorr q0, q1, q1 \n"
"vadd.f32 q8, q8, q9 \n"
"vorr q2, q3, q3 \n"
"subs %0, #1 \n"
"vst1.f32 {d16-d17}, [%3]! \n"
"bne 0b \n"
"sub %1, #16 \n"
"sub %2, #16 \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr),
"w"(_k0), // %8
"w"(_k1), // %9
"w"(_k2), // %10
"w"(_k3) // %11
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
}
#endif // __aarch64__
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
#endif
for (; remain > 0; remain--)
{
#if __ARM_NEON
float32x2_t _r0 = vld1_f32(r0);
float32x2_t _r1 = vld1_f32(r1);
float32x4_t _r0r1 = vcombine_f32(_r0, _r1);
float32x4_t _s0s1 = vmulq_f32(_r0r1, _k0123);
float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1));
_s = vpadd_f32(_s, _s);
*outptr += vget_lane_f32(_s, 0);
#else
float sum = 0.f;
sum += r0[0] * kernel0[0];
sum += r0[1] * kernel0[1];
sum += r1[0] * kernel0[2];
sum += r1[1] * kernel0[3];
*outptr += sum;
#endif
r0 += 1;
r1 += 1;
outptr++;
}
r0 += 1;
r1 += 1;
}
}
}
}
|
blake2sp.c | /*
BLAKE2 reference source code package - optimized C implementations
Written in 2012 by Samuel Neves <sneves@dei.uc.pt>
To the extent possible under law, the author(s) have dedicated all copyright
and related and neighboring rights to this software to the public domain
worldwide. This software is distributed without any warranty.
You should have received a copy of the CC0 Public Domain Dedication along with
this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 8
static inline int blake2sp_init_leaf( blake2s_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset )
{
blake2s_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
P->leaf_length = 0;
store48( P->node_offset, offset );
P->node_depth = 0;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
static inline int blake2sp_init_root( blake2s_state *S, uint8_t outlen, uint8_t keylen )
{
blake2s_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
P->leaf_length = 0;
store48( P->node_offset, 0ULL );
P->node_depth = 1;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
int blake2sp_init( blake2sp_state *S, const uint8_t outlen )
{
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2sp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2sp_init_key( blake2sp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen )
{
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2sp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2sp_update( blake2sp_state *S, const uint8_t *in, uint64_t inlen )
{
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S->S[id__], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2sp_final( blake2sp_state *S, uint8_t *out, const uint8_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2S_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES;
if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES;
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left );
}
blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES );
}
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES );
blake2s_final( S->R, out, outlen );
return 0;
}
int blake2sp( uint8_t *out, const void *in, const void *key, uint8_t outlen, uint64_t inlen, uint8_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
blake2s_state S[PARALLELISM_DEGREE][1];
blake2s_state FS[1];
/* Verify parameters */
if ( NULL == in ) return -1;
if ( NULL == out ) return -1;
if ( NULL == key ) keylen = 0;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node
if( keylen > 0 )
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S[id__], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
if( inlen__ > id__ * BLAKE2S_BLOCKBYTES )
{
const size_t left = inlen__ - id__ * BLAKE2S_BLOCKBYTES;
const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES;
blake2s_update( S[id__], in__, len );
}
blake2s_final( S[id__], hash[id__], BLAKE2S_OUTBYTES );
}
if( blake2sp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES );
blake2s_final( FS, out, outlen );
return 0;
}
#if defined(BLAKE2SP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( int argc, char **argv )
{
uint8_t key[BLAKE2S_KEYBYTES];
uint8_t buf[KAT_LENGTH];
for( size_t i = 0; i < BLAKE2S_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp( hash, buf, key, BLAKE2S_OUTBYTES, i, BLAKE2S_KEYBYTES );
if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) )
{
puts( "error" );
return -1;
}
}
puts( "ok" );
return 0;
}
#endif
|
GB_unaryop__abs_int32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int32_int64
// op(A') function: GB_tran__abs_int32_int64
// C type: int32_t
// A type: int64_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int32_int64
(
int32_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_5x5.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv5x5s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*25 + q*25;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
int i = 0;
for (; i+1 < outh; i+=2)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r1[3] * k0[3];
sum2 += r1[4] * k0[4];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r2[3] * k1[3];
sum2 += r2[4] * k1[4];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
sum2 += r3[3] * k2[3];
sum2 += r3[4] * k2[4];
sum2 += r4[0] * k3[0];
sum2 += r4[1] * k3[1];
sum2 += r4[2] * k3[2];
sum2 += r4[3] * k3[3];
sum2 += r4[4] * k3[4];
sum2 += r5[0] * k4[0];
sum2 += r5[1] * k4[1];
sum2 += r5[2] * k4[2];
sum2 += r5[3] * k4[3];
sum2 += r5[4] * k4[4];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
outptr++;
outptr2++;
}
r0 += 4 + w;
r1 += 4 + w;
r2 += 4 + w;
r3 += 4 + w;
r4 += 4 + w;
r5 += 4 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
*outptr += sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
}
}
}
|
SpatialFullConvolutionMap.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialFullConvolutionMap.c"
#else
static int nn_(SpatialFullConvolutionMap_updateOutput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
THTensor *connTable = luaT_getfieldcheckudata(L, 1, "connTable", torch_Tensor);
THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor);
THTensor *bias = luaT_getfieldcheckudata(L, 1, "bias", torch_Tensor);
THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
real *input_data;
real *output_data;
real *weight_data;
real *bias_data;
real *connTable_data;
long input_h;
long input_w;
long output_h;
long output_w;
long weight_h;
long weight_w;
long p;
luaL_argcheck(L, input->nDimension == 3, 2, "3D tensor expected");
luaL_argcheck(L, input->size[0] >= nInputPlane, 2, "invalid number of input planes");
THTensor_(resize3d)(output, nOutputPlane,
(input->size[1] - 1) * dH + kH,
(input->size[2] - 1) * dW + kW);
/* contiguous */
input = THTensor_(newContiguous)(input);
output = THTensor_(newContiguous)(output);
/* get raw pointers */
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
weight_data = THTensor_(data)(weight);
bias_data = THTensor_(data)(bias);
connTable_data = THTensor_(data)(connTable);
/* and dims */
input_h = input->size[1];
input_w = input->size[2];
output_h = output->size[1];
output_w = output->size[2];
weight_h = weight->size[1];
weight_w = weight->size[2];
#pragma omp parallel for private(p)
for (p = 0; p < nOutputPlane; p++) {
/* add bias */
real *ptr_output = output_data + p*output_w*output_h;
long j;
int nweight;
long k;
for(j = 0; j < output_h*output_w; j++)
ptr_output[j] = bias_data[p];
/* convolve all maps */
nweight = connTable->size[0];
for (k = 0; k < nweight; k++) {
/* get offsets for input/output */
int o = (int)connTable_data[k*2+1]-1;
int i = (int)connTable_data[k*2+0]-1;
if (o == p)
{
THTensor_(fullConv2Dptr)(output_data + o*output_w*output_h,
1.0,
input_data + i*input_w*input_h, input_h, input_w,
weight_data + k*weight_w*weight_h, weight_h, weight_w,
dH, dW);
}
}
}
/* clean up */
THTensor_(free)(input);
THTensor_(free)(output);
return 1;
}
static int nn_(SpatialFullConvolutionMap_updateGradInput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor);
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
THTensor *connTable = luaT_getfieldcheckudata(L, 1, "connTable", torch_Tensor);
THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor);
THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor);
real *gradInput_data;
real *gradOutput_data;
real *weight_data;
real *connTable_data;
long input_h;
long input_w;
long output_h;
long output_w;
long weight_h;
long weight_w;
long p;
/* contiguous */
gradInput = THTensor_(newContiguous)(gradInput);
gradOutput = THTensor_(newContiguous)(gradOutput);
/* Resize/Zero */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
weight_data = THTensor_(data)(weight);
connTable_data = THTensor_(data)(connTable);
/* and dims */
input_h = input->size[1];
input_w = input->size[2];
output_h = gradOutput->size[1];
output_w = gradOutput->size[2];
weight_h = weight->size[1];
weight_w = weight->size[2];
#pragma omp parallel for private(p)
for(p = 0; p < nInputPlane; p++)
{
long k;
/* backward all */
int nkernel = connTable->size[0];
for(k = 0; k < nkernel; k++)
{
int o = (int)connTable_data[k*2+1]-1;
int i = (int)connTable_data[k*2+0]-1;
if (i == p)
{
/* gradient to input */
THTensor_(validXCorr2Dptr)(gradInput_data + i*input_w*input_h,
1.0,
gradOutput_data + o*output_w*output_h, output_h, output_w,
weight_data + k*weight_w*weight_h, weight_h, weight_w,
dH, dW);
}
}
}
/* clean up */
THTensor_(free)(gradInput);
THTensor_(free)(gradOutput);
return 1;
}
static int nn_(SpatialFullConvolutionMap_accGradParameters)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor);
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
real scale = luaL_optnumber(L, 4, 1);
THTensor *connTable = luaT_getfieldcheckudata(L, 1, "connTable", torch_Tensor);
THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor);
THTensor *gradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_Tensor);
THTensor *gradBias = luaT_getfieldcheckudata(L, 1, "gradBias", torch_Tensor);
real *input_data;
real *gradOutput_data;
real *gradWeight_data;
real *gradBias_data;
long input_h;
long input_w;
long output_h;
long output_w;
long weight_h;
long weight_w;
long k;
int nkernel;
/* contiguous */
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
/* get raw pointers */
input_data = THTensor_(data)(input);
gradOutput_data = THTensor_(data)(gradOutput);
gradWeight_data = THTensor_(data)(gradWeight);
gradBias_data = THTensor_(data)(gradBias);
/* and dims */
input_h = input->size[1];
input_w = input->size[2];
output_h = gradOutput->size[1];
output_w = gradOutput->size[2];
weight_h = weight->size[1];
weight_w = weight->size[2];
/* gradients wrt bias */
#pragma omp parallel for private(k)
for(k = 0; k < nOutputPlane; k++) {
real *ptr_gradOutput = gradOutput_data + k*output_w*output_h;
long l;
for(l = 0; l < output_h*output_w; l++)
gradBias_data[k] += scale*ptr_gradOutput[l];
}
/* gradients wrt weight */
nkernel = connTable->size[0];
#pragma omp parallel for private(k)
for(k = 0; k < nkernel; k++)
{
int o = (int)THTensor_(get2d)(connTable,k,1)-1;
int i = (int)THTensor_(get2d)(connTable,k,0)-1;
/* gradient to kernel */
THTensor_(validXCorr2DRevptr)(gradWeight_data + k*weight_w*weight_h,
scale,
gradOutput_data + o*output_w*output_h, output_h, output_w,
input_data + i*input_w*input_h, input_h, input_w,
dH, dW);
}
/* clean up */
THTensor_(free)(input);
THTensor_(free)(gradOutput);
return 0;
}
static const struct luaL_Reg nn_(SpatialFullConvolutionMapStuff__) [] = {
{"SpatialFullConvolutionMap_updateOutput", nn_(SpatialFullConvolutionMap_updateOutput)},
{"SpatialFullConvolutionMap_updateGradInput", nn_(SpatialFullConvolutionMap_updateGradInput)},
{"SpatialFullConvolutionMap_accGradParameters", nn_(SpatialFullConvolutionMap_accGradParameters)},
{NULL, NULL}
};
static void nn_(SpatialFullConvolutionMap_init)(lua_State *L)
{
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, nn_(SpatialFullConvolutionMapStuff__), "nn");
lua_pop(L,1);
}
#endif
|
phylokernelmixrate.h | /*
* phylokernelmixrate.h
*
* Created on: Jan 7, 2015
* Author: minh
*/
#ifndef PHYLOKERNELMIXRATE_H_
#define PHYLOKERNELMIXRATE_H_
#include "model/modelmixture.h"
/************************************************************************************************
*
* Highly optimized vectorized versions of likelihood functions
*
*************************************************************************************************/
template <class VectorClass, const int VCSIZE, const int nstates>
void PhyloTree::computeMixratePartialLikelihoodEigenSIMD(PhyloNeighbor *dad_branch, PhyloNode *dad) {
if (dad_branch->node->degree() > 3) {
// TODO: SIMD version for multifurcating node
computePartialLikelihoodEigen(dad_branch, dad);
return;
}
// don't recompute the likelihood
ASSERT(dad);
if (dad_branch->partial_lh_computed & 1)
return;
dad_branch->partial_lh_computed |= 1;
size_t nptn = aln->size() + model_factory->unobserved_ptns.size();
PhyloNode *node = (PhyloNode*)(dad_branch->node);
if (node->isLeaf()) {
dad_branch->lh_scale_factor = 0.0;
//memset(dad_branch->scale_num, 0, nptn * sizeof(UBYTE));
if (!tip_partial_lh_computed)
computeTipPartialLikelihood();
return;
}
size_t ptn, c;
size_t orig_ntn = aln->size();
size_t ncat = site_rate->getNRate();
ASSERT(ncat == model->getNMixtures());
ASSERT(nstates == aln->num_states && nstates >= VCSIZE && VCSIZE == VectorClass().size());
ASSERT(model->isReversible()); // only works with reversible model!
const size_t nstatesqr=nstates*nstates;
size_t i, x, j;
size_t block = nstates * ncat;
// internal node
ASSERT(node->degree() == 3); // it works only for strictly bifurcating tree
PhyloNeighbor *left = NULL, *right = NULL; // left & right are two neighbors leading to 2 subtrees
FOR_NEIGHBOR_IT(node, dad, it) {
if (!left) left = (PhyloNeighbor*)(*it); else right = (PhyloNeighbor*)(*it);
}
if (!left->node->isLeaf() && right->node->isLeaf()) {
// swap left and right
PhyloNeighbor *tmp = left;
left = right;
right = tmp;
}
if ((left->partial_lh_computed & 1) == 0)
computeMixratePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(left, node);
if ((right->partial_lh_computed & 1) == 0)
computeMixratePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(right, node);
if (params->lh_mem_save == LM_PER_NODE && !dad_branch->partial_lh) {
// re-orient partial_lh
bool done = false;
FOR_NEIGHBOR_IT(node, dad, it2) {
PhyloNeighbor *backnei = ((PhyloNeighbor*)(*it2)->node->findNeighbor(node));
if (backnei->partial_lh) {
dad_branch->partial_lh = backnei->partial_lh;
dad_branch->scale_num = backnei->scale_num;
backnei->partial_lh = NULL;
backnei->scale_num = NULL;
backnei->partial_lh_computed &= ~1; // clear bit
done = true;
break;
}
}
ASSERT(done && "partial_lh is not re-oriented");
}
double *evec = model->getEigenvectors();
double *inv_evec = model->getInverseEigenvectors();
VectorClass *vc_inv_evec = aligned_alloc<VectorClass>(ncat*nstates*nstates/VCSIZE);
ASSERT(inv_evec && evec);
for (c = 0; c < ncat; c++)
for (i = 0; i < nstates; i++) {
for (x = 0; x < nstates/VCSIZE; x++)
// inv_evec is not aligned!
vc_inv_evec[(c*nstates+i)*nstates/VCSIZE+x].load_a(&inv_evec[c*nstatesqr+i*nstates+x*VCSIZE]);
}
double *eval = model->getEigenvalues();
dad_branch->lh_scale_factor = left->lh_scale_factor + right->lh_scale_factor;
VectorClass *eleft = (VectorClass*)aligned_alloc<double>(block*nstates);
VectorClass *eright = (VectorClass*)aligned_alloc<double>(block*nstates);
// precompute information buffer
for (c = 0; c < ncat; c++) {
VectorClass vc_evec;
VectorClass expleft[nstates/VCSIZE];
VectorClass expright[nstates/VCSIZE];
double len_left = site_rate->getRate(c) * left->length;
double len_right = site_rate->getRate(c) * right->length;
for (i = 0; i < nstates/VCSIZE; i++) {
// eval is not aligned!
expleft[i] = exp(VectorClass().load_a(&eval[c*nstates+i*VCSIZE]) * VectorClass(len_left));
expright[i] = exp(VectorClass().load_a(&eval[c*nstates+i*VCSIZE]) * VectorClass(len_right));
}
for (x = 0; x < nstates; x++)
for (i = 0; i < nstates/VCSIZE; i++) {
// evec is not be aligned!
vc_evec.load_a(&evec[c*nstatesqr+x*nstates+i*VCSIZE]);
eleft[c*nstatesqr/VCSIZE+x*nstates/VCSIZE+i] = (vc_evec * expleft[i]);
eright[c*nstatesqr/VCSIZE+x*nstates/VCSIZE+i] = (vc_evec * expright[i]);
}
}
if (left->node->isLeaf() && right->node->isLeaf()) {
// special treatment for TIP-TIP (cherry) case
// pre compute information for both tips
double *partial_lh_left = aligned_alloc<double>((aln->STATE_UNKNOWN+1)*block);
double *partial_lh_right = aligned_alloc<double>((aln->STATE_UNKNOWN+1)*block);
vector<int>::iterator it;
for (it = aln->seq_states[left->node->id].begin(); it != aln->seq_states[left->node->id].end(); it++) {
int state = (*it);
VectorClass vc_partial_lh_tmp[nstates/VCSIZE];
VectorClass vleft[VCSIZE];
for (c = 0; c < ncat; c++) {
size_t addrtip = state*block+c*nstates;
for (i = 0; i < nstates/VCSIZE; i++)
vc_partial_lh_tmp[i].load_a(&tip_partial_lh[addrtip+i*VCSIZE]);
for (x = 0; x < nstates; x+=VCSIZE) {
size_t addr = (c*nstates+x)*nstates/VCSIZE;
for (j = 0; j < VCSIZE; j++)
vleft[j] = eleft[addr+j*nstates/VCSIZE] * vc_partial_lh_tmp[0];
for (i = 1; i < nstates/VCSIZE; i++) {
for (j = 0; j < VCSIZE; j++)
vleft[j] = mul_add(eleft[addr+j*nstates/VCSIZE+i], vc_partial_lh_tmp[i], vleft[j]);
}
horizontal_add(vleft).store_a(&partial_lh_left[addrtip+x]);
}
}
}
for (it = aln->seq_states[right->node->id].begin(); it != aln->seq_states[right->node->id].end(); it++) {
int state = (*it);
VectorClass vright[VCSIZE];
VectorClass vc_partial_lh_tmp[nstates/VCSIZE];
for (c = 0; c < ncat; c++) {
size_t addrtip = state*block+c*nstates;
for (i = 0; i < nstates/VCSIZE; i++)
vc_partial_lh_tmp[i].load_a(&tip_partial_lh[addrtip+i*VCSIZE]);
for (x = 0; x < nstates; x+=VCSIZE) {
size_t addr = (c*nstates+x)*nstates/VCSIZE;
for (j = 0; j < VCSIZE; j++)
vright[j] = eright[addr+j*nstates/VCSIZE] * vc_partial_lh_tmp[0];
for (i = 1; i < nstates/VCSIZE; i++) {
for (j = 0; j < VCSIZE; j++)
vright[j] = mul_add(eright[addr+j*nstates/VCSIZE+i], vc_partial_lh_tmp[i], vright[j]);
}
horizontal_add(vright).store_a(&partial_lh_right[addrtip+x]);
}
}
}
size_t addr_unknown = aln->STATE_UNKNOWN * block;
for (x = 0; x < block; x++) {
partial_lh_left[addr_unknown+x] = 1.0;
partial_lh_right[addr_unknown+x] = 1.0;
}
// assign pointers for left and right partial_lh
double **lh_left_ptr = aligned_alloc<double*>(nptn);
double **lh_right_ptr = aligned_alloc<double*>(nptn);
for (ptn = 0; ptn < orig_ntn; ptn++) {
lh_left_ptr[ptn] = &partial_lh_left[block * (aln->at(ptn))[left->node->id]];
lh_right_ptr[ptn] = &partial_lh_right[block * (aln->at(ptn))[right->node->id]];
}
for (ptn = orig_ntn; ptn < nptn; ptn++) {
lh_left_ptr[ptn] = &partial_lh_left[block * model_factory->unobserved_ptns[ptn-orig_ntn]];
lh_right_ptr[ptn] = &partial_lh_right[block * model_factory->unobserved_ptns[ptn-orig_ntn]];
}
// scale number must be ZERO
memset(dad_branch->scale_num, 0, nptn * sizeof(UBYTE));
VectorClass vc_partial_lh_tmp[nstates/VCSIZE];
VectorClass res[VCSIZE];
#ifdef _OPENMP
#pragma omp parallel for private(ptn, c, x, i, j, vc_partial_lh_tmp, res)
#endif
for (ptn = 0; ptn < nptn; ptn++) {
double *partial_lh = dad_branch->partial_lh + ptn*block;
double *lh_left = lh_left_ptr[ptn];
double *lh_right = lh_right_ptr[ptn];
for (c = 0; c < ncat; c++) {
// compute real partial likelihood vector
VectorClass *this_inv_evec = &vc_inv_evec[c*nstatesqr/VCSIZE];
for (x = 0; x < nstates/VCSIZE; x++) {
vc_partial_lh_tmp[x] = (VectorClass().load_a(&lh_left[x*VCSIZE]) * VectorClass().load_a(&lh_right[x*VCSIZE]));
}
// compute dot-product with inv_eigenvector
for (i = 0; i < nstates; i+=VCSIZE) {
for (j = 0; j < VCSIZE; j++) {
res[j] = vc_partial_lh_tmp[0] * this_inv_evec[(i+j)*nstates/VCSIZE];
}
for (x = 1; x < nstates/VCSIZE; x++)
for (j = 0; j < VCSIZE; j++) {
res[j] = mul_add(vc_partial_lh_tmp[x], this_inv_evec[(i+j)*nstates/VCSIZE+x], res[j]);
}
horizontal_add(res).store_a(&partial_lh[i]);
}
lh_left += nstates;
lh_right += nstates;
partial_lh += nstates;
}
}
aligned_free(lh_left_ptr);
aligned_free(lh_right_ptr);
aligned_free(partial_lh_right);
aligned_free(partial_lh_left);
} else if (left->node->isLeaf() && !right->node->isLeaf()) {
// special treatment to TIP-INTERNAL NODE case
// only take scale_num from the right subtree
memcpy(dad_branch->scale_num, right->scale_num, nptn * sizeof(UBYTE));
// pre compute information for left tip
double *partial_lh_left = aligned_alloc<double>((aln->STATE_UNKNOWN+1)*block);
vector<int>::iterator it;
for (it = aln->seq_states[left->node->id].begin(); it != aln->seq_states[left->node->id].end(); it++) {
int state = (*it);
VectorClass vc_partial_lh_tmp[nstates/VCSIZE];
VectorClass vleft[VCSIZE];
for (c = 0; c < ncat; c++) {
size_t addrtip = state*block+c*nstates;
for (i = 0; i < nstates/VCSIZE; i++)
vc_partial_lh_tmp[i].load_a(&tip_partial_lh[addrtip+i*VCSIZE]);
for (x = 0; x < nstates; x+=VCSIZE) {
size_t addr = (c*nstates+x)*nstates/VCSIZE;
for (j = 0; j < VCSIZE; j++)
vleft[j] = eleft[addr+j*nstates/VCSIZE] * vc_partial_lh_tmp[0];
for (i = 1; i < nstates/VCSIZE; i++) {
for (j = 0; j < VCSIZE; j++)
vleft[j] = mul_add(eleft[addr+j*nstates/VCSIZE+i], vc_partial_lh_tmp[i], vleft[j]);
}
horizontal_add(vleft).store_a(&partial_lh_left[addrtip+x]);
}
}
}
size_t addr_unknown = aln->STATE_UNKNOWN * block;
for (x = 0; x < block; x++) {
partial_lh_left[addr_unknown+x] = 1.0;
}
// assign pointers for partial_lh_left
double **lh_left_ptr = aligned_alloc<double*>(nptn);
for (ptn = 0; ptn < orig_ntn; ptn++) {
lh_left_ptr[ptn] = &partial_lh_left[block * (aln->at(ptn))[left->node->id]];
}
for (ptn = orig_ntn; ptn < nptn; ptn++) {
lh_left_ptr[ptn] = &partial_lh_left[block * model_factory->unobserved_ptns[ptn-orig_ntn]];
}
double sum_scale = 0.0;
VectorClass vc_lh_right[nstates/VCSIZE];
VectorClass vc_partial_lh_tmp[nstates/VCSIZE];
VectorClass res[VCSIZE];
VectorClass vc_max; // maximum of partial likelihood, for scaling check
VectorClass vright[VCSIZE];
#ifdef _OPENMP
#pragma omp parallel for reduction(+: sum_scale) private (ptn, c, x, i, j, vc_lh_right, vc_partial_lh_tmp, res, vc_max, vright)
#endif
for (ptn = 0; ptn < nptn; ptn++) {
double *partial_lh = dad_branch->partial_lh + ptn*block;
double *partial_lh_right = right->partial_lh + ptn*block;
double *lh_left = lh_left_ptr[ptn];
vc_max = 0.0;
for (c = 0; c < ncat; c++) {
// compute real partial likelihood vector
VectorClass *this_inv_evec = &vc_inv_evec[c*nstatesqr/VCSIZE];
for (i = 0; i < nstates/VCSIZE; i++)
vc_lh_right[i].load_a(&partial_lh_right[i*VCSIZE]);
for (x = 0; x < nstates/VCSIZE; x++) {
size_t addr = c*nstatesqr/VCSIZE+x*nstates;
for (j = 0; j < VCSIZE; j++) {
vright[j] = eright[addr+nstates*j/VCSIZE] * vc_lh_right[0];
}
for (i = 1; i < nstates/VCSIZE; i++)
for (j = 0; j < VCSIZE; j++) {
vright[j] = mul_add(eright[addr+i+nstates*j/VCSIZE], vc_lh_right[i], vright[j]);
}
vc_partial_lh_tmp[x] = VectorClass().load_a(&lh_left[x*VCSIZE])
* horizontal_add(vright);
}
// compute dot-product with inv_eigenvector
for (i = 0; i < nstates; i+=VCSIZE) {
for (j = 0; j < VCSIZE; j++) {
res[j] = vc_partial_lh_tmp[0] * this_inv_evec[(i+j)*nstates/VCSIZE];
}
for (x = 1; x < nstates/VCSIZE; x++) {
for (j = 0; j < VCSIZE; j++) {
res[j] = mul_add(vc_partial_lh_tmp[x], this_inv_evec[(i+j)*nstates/VCSIZE+x], res[j]);
}
}
VectorClass sum_res = horizontal_add(res);
sum_res.store_a(&partial_lh[i]);
vc_max = max(vc_max, abs(sum_res)); // take the maximum for scaling check
}
lh_left += nstates;
partial_lh_right += nstates;
partial_lh += nstates;
}
// check if one should scale partial likelihoods
double lh_max = horizontal_max(vc_max);
if (lh_max < SCALING_THRESHOLD) {
// now do the likelihood scaling
partial_lh -= block; // revert its pointer
VectorClass scale_thres(SCALING_THRESHOLD_INVER);
for (i = 0; i < block; i+=VCSIZE) {
(VectorClass().load_a(&partial_lh[i]) * scale_thres).store_a(&partial_lh[i]);
}
// unobserved const pattern will never have underflow
sum_scale += LOG_SCALING_THRESHOLD * ptn_freq[ptn];
dad_branch->scale_num[ptn] += 1;
partial_lh += block; // increase the pointer again
}
}
dad_branch->lh_scale_factor += sum_scale;
aligned_free(lh_left_ptr);
aligned_free(partial_lh_left);
} else {
// both left and right are internal node
double sum_scale = 0.0;
VectorClass vc_max; // maximum of partial likelihood, for scaling check
VectorClass vc_partial_lh_tmp[nstates/VCSIZE];
VectorClass vc_lh_left[nstates/VCSIZE], vc_lh_right[nstates/VCSIZE];
VectorClass res[VCSIZE];
VectorClass vleft[VCSIZE], vright[VCSIZE];
#ifdef _OPENMP
#pragma omp parallel for reduction (+: sum_scale) private(ptn, c, x, i, j, vc_max, vc_partial_lh_tmp, vc_lh_left, vc_lh_right, res, vleft, vright)
#endif
for (ptn = 0; ptn < nptn; ptn++) {
double *partial_lh = dad_branch->partial_lh + ptn*block;
double *partial_lh_left = left->partial_lh + ptn*block;
double *partial_lh_right = right->partial_lh + ptn*block;
dad_branch->scale_num[ptn] = left->scale_num[ptn] + right->scale_num[ptn];
vc_max = 0.0;
for (c = 0; c < ncat; c++) {
// compute real partial likelihood vector
VectorClass *this_inv_evec = &vc_inv_evec[c*nstatesqr/VCSIZE];
for (i = 0; i < nstates/VCSIZE; i++) {
vc_lh_left[i].load_a(&partial_lh_left[i*VCSIZE]);
vc_lh_right[i].load_a(&partial_lh_right[i*VCSIZE]);
}
for (x = 0; x < nstates/VCSIZE; x++) {
size_t addr = c*nstatesqr/VCSIZE+x*nstates;
for (j = 0; j < VCSIZE; j++) {
size_t addr_com = addr+j*nstates/VCSIZE;
vleft[j] = eleft[addr_com] * vc_lh_left[0];
vright[j] = eright[addr_com] * vc_lh_right[0];
}
for (i = 1; i < nstates/VCSIZE; i++) {
for (j = 0; j < VCSIZE; j++) {
size_t addr_com = addr+i+j*nstates/VCSIZE;
vleft[j] = mul_add(eleft[addr_com], vc_lh_left[i], vleft[j]);
vright[j] = mul_add(eright[addr_com], vc_lh_right[i], vright[j]);
}
}
vc_partial_lh_tmp[x] = horizontal_add(vleft) * horizontal_add(vright);
}
// compute dot-product with inv_eigenvector
for (i = 0; i < nstates; i+=VCSIZE) {
for (j = 0; j < VCSIZE; j++) {
res[j] = vc_partial_lh_tmp[0] * this_inv_evec[(i+j)*nstates/VCSIZE];
}
for (x = 1; x < nstates/VCSIZE; x++)
for (j = 0; j < VCSIZE; j++)
res[j] = mul_add(vc_partial_lh_tmp[x], this_inv_evec[(i+j)*nstates/VCSIZE+x], res[j]);
VectorClass sum_res = horizontal_add(res);
sum_res.store_a(&partial_lh[i]);
vc_max = max(vc_max, abs(sum_res)); // take the maximum for scaling check
}
partial_lh += nstates;
partial_lh_left += nstates;
partial_lh_right += nstates;
}
// check if one should scale partial likelihoods
double lh_max = horizontal_max(vc_max);
if (lh_max < SCALING_THRESHOLD) {
// now do the likelihood scaling
partial_lh -= block; // revert its pointer
VectorClass scale_thres(SCALING_THRESHOLD_INVER);
for (i = 0; i < block; i+=VCSIZE) {
(VectorClass().load_a(&partial_lh[i]) * scale_thres).store_a(&partial_lh[i]);
}
// unobserved const pattern will never have underflow
sum_scale += LOG_SCALING_THRESHOLD * ptn_freq[ptn];
dad_branch->scale_num[ptn] += 1;
partial_lh += block; // increase the pointer again
}
}
dad_branch->lh_scale_factor += sum_scale;
}
aligned_free(eright);
aligned_free(eleft);
aligned_free(vc_inv_evec);
}
template <class VectorClass, const int VCSIZE, const int nstates>
void PhyloTree::computeMixrateLikelihoodDervEigenSIMD(PhyloNeighbor *dad_branch, PhyloNode *dad, double &df, double &ddf) {
PhyloNode *node = (PhyloNode*) dad_branch->node;
PhyloNeighbor *node_branch = (PhyloNeighbor*) node->findNeighbor(dad);
if (!central_partial_lh)
initializeAllPartialLh();
if (node->isLeaf()) {
PhyloNode *tmp_node = dad;
dad = node;
node = tmp_node;
PhyloNeighbor *tmp_nei = dad_branch;
dad_branch = node_branch;
node_branch = tmp_nei;
}
if ((dad_branch->partial_lh_computed & 1) == 0)
computeMixratePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(dad_branch, dad);
if ((node_branch->partial_lh_computed & 1) == 0)
computeMixratePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(node_branch, node);
df = ddf = 0.0;
size_t ncat = site_rate->getNRate();
size_t block = ncat * nstates;
size_t ptn; // for big data size > 4GB memory required
size_t c, i, j;
size_t orig_nptn = aln->size();
size_t nptn = aln->size()+model_factory->unobserved_ptns.size();
size_t maxptn = ((nptn+VCSIZE-1)/VCSIZE)*VCSIZE;
maxptn = max(maxptn, aln->size()+((model_factory->unobserved_ptns.size()+VCSIZE-1)/VCSIZE)*VCSIZE);
double *eval = model->getEigenvalues();
ASSERT(eval);
VectorClass *vc_val0 = (VectorClass*)aligned_alloc<double>(block);
VectorClass *vc_val1 = (VectorClass*)aligned_alloc<double>(block);
VectorClass *vc_val2 = (VectorClass*)aligned_alloc<double>(block);
VectorClass vc_len = dad_branch->length;
for (c = 0; c < ncat; c++) {
VectorClass vc_rate = site_rate->getRate(c);
VectorClass vc_prop = site_rate->getProp(c);
for (i = 0; i < nstates/VCSIZE; i++) {
VectorClass cof = VectorClass().load_a(&eval[c*nstates+i*VCSIZE]) * vc_rate;
VectorClass val = exp(cof*vc_len) * vc_prop;
VectorClass val1_ = cof*val;
vc_val0[c*nstates/VCSIZE+i] = val;
vc_val1[c*nstates/VCSIZE+i] = val1_;
vc_val2[c*nstates/VCSIZE+i] = cof*val1_;
}
}
ASSERT(theta_all);
if (!theta_computed) {
theta_computed = true;
// precompute theta for fast branch length optimization
if (dad->isLeaf()) {
// special treatment for TIP-INTERNAL NODE case
#ifdef _OPENMP
#pragma omp parallel for private(ptn, i)
#endif
for (ptn = 0; ptn < orig_nptn; ptn++) {
double *partial_lh_dad = dad_branch->partial_lh + ptn*block;
double *theta = theta_all + ptn*block;
double *lh_dad = &tip_partial_lh[(aln->at(ptn))[dad->id] * nstates * ncat];
for (i = 0; i < block; i+=VCSIZE) {
(VectorClass().load_a(&lh_dad[i]) * VectorClass().load_a(&partial_lh_dad[i])).store_a(&theta[i]);
}
}
// ascertainment bias correction
for (ptn = orig_nptn; ptn < nptn; ptn++) {
double *partial_lh_dad = dad_branch->partial_lh + ptn*block;
double *theta = theta_all + ptn*block;
double *lh_dad = &tip_partial_lh[model_factory->unobserved_ptns[ptn-orig_nptn] * nstates * ncat];
for (i = 0; i < block; i+=VCSIZE) {
(VectorClass().load_a(&lh_dad[i]) * VectorClass().load_a(&partial_lh_dad[i])).store_a(&theta[i]);
}
}
} else {
// both dad and node are internal nodes
double *partial_lh_node = node_branch->partial_lh;
double *partial_lh_dad = dad_branch->partial_lh;
size_t all_entries = nptn*block;
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i < all_entries; i+=VCSIZE) {
(VectorClass().load_a(&partial_lh_node[i]) * VectorClass().load_a(&partial_lh_dad[i]))
.store_a(&theta_all[i]);
}
}
if (nptn < maxptn) {
// copy dummy values
for (ptn = nptn; ptn < maxptn; ptn++)
memcpy(&theta_all[ptn*block], theta_all, block*sizeof(double));
}
}
VectorClass vc_ptn[VCSIZE], vc_df[VCSIZE], vc_ddf[VCSIZE], vc_theta[VCSIZE];
VectorClass vc_unit = 1.0;
VectorClass vc_freq;
VectorClass df_final = 0.0, ddf_final = 0.0;
// these stores values of 2 consecutive patterns
VectorClass lh_ptn, df_ptn, ddf_ptn, inv_lh_ptn;
// perform 2 sites at the same time for SSE/AVX efficiency
#ifdef _OPENMP
#pragma omp parallel private (ptn, i, j, vc_freq, vc_ptn, vc_df, vc_ddf, vc_theta, inv_lh_ptn, lh_ptn, df_ptn, ddf_ptn)
{
VectorClass df_final_th = 0.0;
VectorClass ddf_final_th = 0.0;
#pragma omp for nowait
#endif
for (ptn = 0; ptn < orig_nptn; ptn+=VCSIZE) {
double *theta = theta_all + ptn*block;
// initialization
for (i = 0; i < VCSIZE; i++) {
vc_theta[i].load_a(theta+i*block);
vc_ptn[i] = vc_val0[0] * vc_theta[i];
vc_df[i] = vc_val1[0] * vc_theta[i];
vc_ddf[i] = vc_val2[0] * vc_theta[i];
}
for (i = 1; i < block/VCSIZE; i++) {
for (j = 0; j < VCSIZE; j++) {
vc_theta[j].load_a(&theta[i*VCSIZE+j*block]);
vc_ptn[j] = mul_add(vc_theta[j], vc_val0[i], vc_ptn[j]);
vc_df[j] = mul_add(vc_theta[j], vc_val1[i], vc_df[j]);
vc_ddf[j] = mul_add(vc_theta[j], vc_val2[i], vc_ddf[j]);
}
}
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load_a(&ptn_invar[ptn]);
inv_lh_ptn = vc_unit / abs(lh_ptn);
vc_freq.load_a(&ptn_freq[ptn]);
df_ptn = horizontal_add(vc_df) * inv_lh_ptn;
ddf_ptn = horizontal_add(vc_ddf) * inv_lh_ptn;
ddf_ptn = nmul_add(df_ptn, df_ptn, ddf_ptn);
#ifdef _OPENMP
df_final_th = mul_add(df_ptn, vc_freq, df_final_th);
ddf_final_th = mul_add(ddf_ptn, vc_freq, ddf_final_th);
#else
df_final = mul_add(df_ptn, vc_freq, df_final);
ddf_final = mul_add(ddf_ptn, vc_freq, ddf_final);
#endif
}
#ifdef _OPENMP
#pragma omp critical
{
df_final += df_final_th;
ddf_final += ddf_final_th;
}
}
#endif
df = horizontal_add(df_final);
ddf = horizontal_add(ddf_final);
if (isnan(df) || isinf(df)) {
df = 0.0;
ddf = 0.0;
// outWarning("Numerical instability (some site-likelihood = 0)");
}
// assert(isnormal(tree_lh));
if (orig_nptn < nptn) {
// ascertaiment bias correction
VectorClass lh_final = 0.0;
df_final = 0.0;
ddf_final = 0.0;
lh_ptn = 0.0;
df_ptn = 0.0;
ddf_ptn = 0.0;
double prob_const, df_const, ddf_const;
double *theta = &theta_all[orig_nptn*block];
for (ptn = orig_nptn; ptn < nptn; ptn+=VCSIZE) {
lh_final += lh_ptn;
df_final += df_ptn;
ddf_final += ddf_ptn;
// initialization
for (i = 0; i < VCSIZE; i++) {
vc_theta[i].load_a(theta+i*block);
vc_ptn[i] = vc_val0[0] * vc_theta[i];
vc_df[i] = vc_val1[0] * vc_theta[i];
vc_ddf[i] = vc_val2[0] * vc_theta[i];
}
for (i = 1; i < block/VCSIZE; i++) {
for (j = 0; j < VCSIZE; j++) {
vc_theta[j].load_a(&theta[i*VCSIZE+j*block]);
vc_ptn[j] = mul_add(vc_theta[j], vc_val0[i], vc_ptn[j]);
vc_df[j] = mul_add(vc_theta[j], vc_val1[i], vc_df[j]);
vc_ddf[j] = mul_add(vc_theta[j], vc_val2[i], vc_ddf[j]);
}
}
theta += block*VCSIZE;
// ptn_invar[ptn] is not aligned
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load(&ptn_invar[ptn]);
df_ptn = horizontal_add(vc_df);
ddf_ptn = horizontal_add(vc_ddf);
}
switch ((nptn-orig_nptn) % VCSIZE) {
case 0:
prob_const = horizontal_add(lh_final+lh_ptn);
df_const = horizontal_add(df_final+df_ptn);
ddf_const = horizontal_add(ddf_final+ddf_ptn);
break;
case 1:
prob_const = horizontal_add(lh_final)+lh_ptn[0];
df_const = horizontal_add(df_final)+df_ptn[0];
ddf_const = horizontal_add(ddf_final)+ddf_ptn[0];
break;
case 2:
prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1];
df_const = horizontal_add(df_final)+df_ptn[0]+df_ptn[1];
ddf_const = horizontal_add(ddf_final)+ddf_ptn[0]+ddf_ptn[1];
break;
case 3:
prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]+lh_ptn[2];
df_const = horizontal_add(df_final)+df_ptn[0]+df_ptn[1]+df_ptn[2];
ddf_const = horizontal_add(ddf_final)+ddf_ptn[0]+ddf_ptn[1]+ddf_ptn[2];
break;
default:
ASSERT(0);
break;
}
prob_const = 1.0 - prob_const;
double df_frac = df_const / prob_const;
double ddf_frac = ddf_const / prob_const;
int nsites = aln->getNSite();
df += nsites * df_frac;
ddf += nsites *(ddf_frac + df_frac*df_frac);
}
aligned_free(vc_val2);
aligned_free(vc_val1);
aligned_free(vc_val0);
}
template <class VectorClass, const int VCSIZE, const int nstates>
double PhyloTree::computeMixrateLikelihoodBranchEigenSIMD(PhyloNeighbor *dad_branch, PhyloNode *dad) {
PhyloNode *node = (PhyloNode*) dad_branch->node;
PhyloNeighbor *node_branch = (PhyloNeighbor*) node->findNeighbor(dad);
if (!central_partial_lh)
initializeAllPartialLh();
if (node->isLeaf()) {
PhyloNode *tmp_node = dad;
dad = node;
node = tmp_node;
PhyloNeighbor *tmp_nei = dad_branch;
dad_branch = node_branch;
node_branch = tmp_nei;
}
if ((dad_branch->partial_lh_computed & 1) == 0)
computeMixratePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(dad_branch, dad);
if ((node_branch->partial_lh_computed & 1) == 0)
computeMixratePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(node_branch, node);
double tree_lh = node_branch->lh_scale_factor + dad_branch->lh_scale_factor;
size_t ncat = site_rate->getNRate();
size_t block = ncat * nstates;
size_t ptn; // for big data size > 4GB memory required
size_t c, i, j;
size_t orig_nptn = aln->size();
size_t nptn = aln->size()+model_factory->unobserved_ptns.size();
size_t maxptn = ((nptn+VCSIZE-1)/VCSIZE)*VCSIZE;
maxptn = max(maxptn, aln->size()+((model_factory->unobserved_ptns.size()+VCSIZE-1)/VCSIZE)*VCSIZE);
double *eval = model->getEigenvalues();
ASSERT(eval);
VectorClass *vc_val = aligned_alloc<VectorClass>(block/VCSIZE);
for (c = 0; c < ncat; c++) {
double len = site_rate->getRate(c)*dad_branch->length;
VectorClass vc_len(len);
VectorClass vc_prop(site_rate->getProp(c));
for (i = 0; i < nstates/VCSIZE; i++) {
// eval is not aligned!
vc_val[c*nstates/VCSIZE+i] = exp(VectorClass().load_a(&eval[c*nstates+i*VCSIZE]) * vc_len) * vc_prop;
}
}
double prob_const = 0.0;
if (dad->isLeaf()) {
// special treatment for TIP-INTERNAL NODE case
// VectorClass vc_tip_partial_lh[nstates];
// VectorClass vc_partial_lh_dad[VCSIZE];
VectorClass vc_ptn[VCSIZE];
VectorClass lh_final(0.0), vc_freq;
VectorClass lh_ptn; // store likelihoods of VCSIZE consecutive patterns
// precompute information from one tip
double *partial_lh_node = aligned_alloc<double>((aln->STATE_UNKNOWN+1)*block);
IntVector states_dad = aln->seq_states[dad->id];
states_dad.push_back(aln->STATE_UNKNOWN);
for (IntVector::iterator it = states_dad.begin(); it != states_dad.end(); it++) {
double *lh_node = partial_lh_node + (*it)*block;
double *lh_tip = tip_partial_lh + (*it)*block;
for (i = 0; i < block; i+=VCSIZE)
(vc_val[i/VCSIZE]*VectorClass().load_a(&lh_tip[i])).store_a(&lh_node[i]);
}
// now do the real computation
// double **lh_states_dad = aligned_alloc<double*>(maxptn);
// for (ptn = 0; ptn < orig_nptn; ptn++)
// lh_states_dad[ptn] = &tip_partial_lh[(aln->at(ptn))[dad->id] * nstates * ncat];
// for (ptn = orig_nptn; ptn < nptn; ptn++)
// lh_states_dad[ptn] = &tip_partial_lh[model_factory->unobserved_ptns[ptn-orig_nptn] * nstates * ncat];
// // initialize beyond #patterns for efficiency
// for (ptn = nptn; ptn < maxptn; ptn++)
// lh_states_dad[ptn] = &tip_partial_lh[aln->STATE_UNKNOWN * nstates * ncat];
int *ptn_states_dad = aligned_alloc<int>(maxptn);
for (ptn = 0; ptn < orig_nptn; ptn++)
ptn_states_dad[ptn] = (aln->at(ptn))[dad->id];
for (ptn = orig_nptn; ptn < nptn; ptn++)
ptn_states_dad[ptn] = model_factory->unobserved_ptns[ptn-orig_nptn];
// initialize beyond #patterns for efficiency
for (ptn = nptn; ptn < maxptn; ptn++)
ptn_states_dad[ptn] = aln->STATE_UNKNOWN;
// copy dummy values because VectorClass will access beyond nptn
for (ptn = nptn; ptn < maxptn; ptn++)
memcpy(&dad_branch->partial_lh[ptn*block], dad_branch->partial_lh, block*sizeof(double));
#ifdef _OPENMP
#pragma omp parallel private(ptn, i, j, vc_ptn, vc_freq, lh_ptn)
{
VectorClass lh_final_th = 0.0;
#pragma omp for nowait
#endif
// main loop over all patterns with a step size of VCSIZE
for (ptn = 0; ptn < orig_nptn; ptn+=VCSIZE) {
for (j = 0; j < VCSIZE; j++) {
vc_ptn[j] = 0.0;
double *partial_lh_dad = dad_branch->partial_lh + (ptn+j)*block;
int state_dad = ptn_states_dad[ptn+j];
double *lh_node = &partial_lh_node[state_dad*block];
for (i = 0; i < block; i+=VCSIZE) {
vc_ptn[j] = mul_add(VectorClass().load_a(&lh_node[i]),
VectorClass().load_a(&partial_lh_dad[i]), vc_ptn[j]);
}
}
vc_freq.load_a(&ptn_freq[ptn]);
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load_a(&ptn_invar[ptn]);
lh_ptn = log(abs(lh_ptn));
lh_ptn.store_a(&_pattern_lh[ptn]);
// multiply with pattern frequency
#ifdef _OPENMP
lh_final_th = mul_add(lh_ptn, vc_freq, lh_final_th);
#else
lh_final = mul_add(lh_ptn, vc_freq, lh_final);
#endif
}
#ifdef _OPENMP
#pragma omp critical
{
lh_final += lh_final_th;
}
}
#endif
tree_lh += horizontal_add(lh_final);
ASSERT(!isnan(tree_lh) && !isinf(tree_lh));
// ascertainment bias correction
if (orig_nptn < nptn) {
lh_final = 0.0;
lh_ptn = 0.0;
for (ptn = orig_nptn; ptn < nptn; ptn+=VCSIZE) {
lh_final += lh_ptn;
for (j = 0; j < VCSIZE; j++) {
vc_ptn[j] = 0.0;
double *partial_lh_dad = &dad_branch->partial_lh[(ptn+j)*block];
int state_dad = ptn_states_dad[ptn+j];
double *lh_node = &partial_lh_node[state_dad*block];
for (i = 0; i < block; i+=VCSIZE) {
vc_ptn[j] = mul_add(VectorClass().load_a(&lh_node[i]),
VectorClass().load_a(&partial_lh_dad[i]), vc_ptn[j]);
}
}
// bugfix 2016-01-21, prob_const can be rescaled
for (j = 0; j < VCSIZE; j++)
if (dad_branch->scale_num[ptn+j] >= 1)
vc_ptn[j] = vc_ptn[j] * SCALING_THRESHOLD;
// ptn_invar[ptn] is not aligned
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load(&ptn_invar[ptn]);
}
switch ((nptn-orig_nptn)%VCSIZE) {
case 0: prob_const = horizontal_add(lh_final+lh_ptn); break;
case 1: prob_const = horizontal_add(lh_final)+lh_ptn[0]; break;
case 2: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]; break;
case 3: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]+lh_ptn[2]; break;
default: ASSERT(0); break;
}
}
// aligned_free(lh_states_dad);
aligned_free(ptn_states_dad);
aligned_free(partial_lh_node);
} else {
// both dad and node are internal nodes
VectorClass vc_partial_lh_node[VCSIZE];
VectorClass vc_partial_lh_dad[VCSIZE], vc_ptn[VCSIZE];
VectorClass lh_final(0.0), vc_freq;
VectorClass lh_ptn;
// copy dummy values because VectorClass will access beyond nptn
for (ptn = nptn; ptn < maxptn; ptn++) {
memcpy(&dad_branch->partial_lh[ptn*block], dad_branch->partial_lh, block*sizeof(double));
memcpy(&node_branch->partial_lh[ptn*block], node_branch->partial_lh, block*sizeof(double));
}
#ifdef _OPENMP
#pragma omp parallel private(ptn, i, j, vc_partial_lh_node, vc_partial_lh_dad, vc_ptn, vc_freq, lh_ptn)
{
VectorClass lh_final_th = 0.0;
#pragma omp for nowait
#endif
for (ptn = 0; ptn < orig_nptn; ptn+=VCSIZE) {
double *partial_lh_dad = dad_branch->partial_lh + ptn*block;
double *partial_lh_node = node_branch->partial_lh + ptn*block;
for (j = 0; j < VCSIZE; j++)
vc_ptn[j] = 0.0;
for (i = 0; i < block; i+=VCSIZE) {
for (j = 0; j < VCSIZE; j++) {
vc_partial_lh_node[j].load_a(&partial_lh_node[i+j*block]);
vc_partial_lh_dad[j].load_a(&partial_lh_dad[i+j*block]);
vc_ptn[j] = mul_add(vc_val[i/VCSIZE] * vc_partial_lh_node[j], vc_partial_lh_dad[j], vc_ptn[j]);
}
}
vc_freq.load_a(&ptn_freq[ptn]);
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load_a(&ptn_invar[ptn]);
lh_ptn = log(abs(lh_ptn));
lh_ptn.store_a(&_pattern_lh[ptn]);
#ifdef _OPENMP
lh_final_th = mul_add(lh_ptn, vc_freq, lh_final_th);
#else
lh_final = mul_add(lh_ptn, vc_freq, lh_final);
#endif
}
#ifdef _OPENMP
#pragma omp critical
{
lh_final += lh_final_th;
}
}
#endif
tree_lh += horizontal_add(lh_final);
ASSERT(!isnan(tree_lh) && !isinf(tree_lh));
if (orig_nptn < nptn) {
// ascertainment bias correction
lh_final = 0.0;
lh_ptn = 0.0;
double *partial_lh_node = &node_branch->partial_lh[orig_nptn*block];
double *partial_lh_dad = &dad_branch->partial_lh[orig_nptn*block];
for (ptn = orig_nptn; ptn < nptn; ptn+=VCSIZE) {
lh_final += lh_ptn;
for (j = 0; j < VCSIZE; j++)
vc_ptn[j] = 0.0;
for (i = 0; i < block; i+=VCSIZE) {
for (j = 0; j < VCSIZE; j++) {
vc_partial_lh_node[j].load_a(&partial_lh_node[i+j*block]);
vc_partial_lh_dad[j].load_a(&partial_lh_dad[i+j*block]);
vc_ptn[j] = mul_add(vc_val[i/VCSIZE] * vc_partial_lh_node[j], vc_partial_lh_dad[j], vc_ptn[j]);
}
}
// bugfix 2016-01-21, prob_const can be rescaled
for (j = 0; j < VCSIZE; j++)
if (dad_branch->scale_num[ptn+j] + node_branch->scale_num[ptn+j] >= 1)
vc_ptn[j] = vc_ptn[j] * SCALING_THRESHOLD;
// ptn_invar[ptn] is not aligned
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load(&ptn_invar[ptn]);
partial_lh_node += block*VCSIZE;
partial_lh_dad += block*VCSIZE;
}
switch ((nptn-orig_nptn)%VCSIZE) {
case 0: prob_const = horizontal_add(lh_final+lh_ptn); break;
case 1: prob_const = horizontal_add(lh_final)+lh_ptn[0]; break;
case 2: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]; break;
case 3: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]+lh_ptn[2]; break;
default: ASSERT(0); break;
}
}
}
if (orig_nptn < nptn) {
// ascertainment bias correction
prob_const = log(1.0 - prob_const);
for (ptn = 0; ptn < orig_nptn; ptn++)
_pattern_lh[ptn] -= prob_const;
tree_lh -= aln->getNSite()*prob_const;
}
aligned_free(vc_val);
return tree_lh;
}
template <class VectorClass, const int VCSIZE, const int nstates>
double PhyloTree::computeMixrateLikelihoodFromBufferEigenSIMD() {
ASSERT(theta_all && theta_computed);
double tree_lh = current_it->lh_scale_factor + current_it_back->lh_scale_factor;
size_t ncat = site_rate->getNRate();
size_t block = ncat * nstates;
size_t ptn; // for big data size > 4GB memory required
size_t c, i, j;
size_t orig_nptn = aln->size();
size_t nptn = aln->size()+model_factory->unobserved_ptns.size();
// size_t maxptn = ((nptn+VCSIZE-1)/VCSIZE)*VCSIZE;
double *eval = model->getEigenvalues();
ASSERT(eval);
VectorClass *vc_val0 = (VectorClass*)aligned_alloc<double>(block);
VectorClass vc_len = current_it->length;
for (c = 0; c < ncat; c++) {
VectorClass vc_rate = site_rate->getRate(c);
VectorClass vc_prop = site_rate->getProp(c);
for (i = 0; i < nstates/VCSIZE; i++) {
VectorClass cof = VectorClass().load_a(&eval[c*nstates+i*VCSIZE]) * vc_rate;
VectorClass val = exp(cof*vc_len) * vc_prop;
vc_val0[c*nstates/VCSIZE+i] = val;
}
}
VectorClass vc_ptn[VCSIZE];
VectorClass vc_freq;
VectorClass lh_final = 0.0;
// these stores values of 2 consecutive patterns
VectorClass lh_ptn;
// perform 2 sites at the same time for SSE/AVX efficiency
#ifdef _OPENMP
#pragma omp parallel private (ptn, i, j, vc_freq, vc_ptn, lh_ptn)
{
VectorClass lh_final_th = 0.0;
#pragma omp for nowait
#endif
for (ptn = 0; ptn < orig_nptn; ptn+=VCSIZE) {
double *theta = theta_all + ptn*block;
// initialization
for (i = 0; i < VCSIZE; i++) {
vc_ptn[i] = vc_val0[0] * VectorClass().load_a(theta+i*block);
}
for (i = 1; i < block/VCSIZE; i++) {
for (j = 0; j < VCSIZE; j++) {
vc_ptn[j] = mul_add(VectorClass().load_a(&theta[i*VCSIZE+j*block]), vc_val0[i], vc_ptn[j]);
}
}
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load_a(&ptn_invar[ptn]);
lh_ptn = log(abs(lh_ptn));
lh_ptn.store_a(&_pattern_lh[ptn]);
vc_freq.load_a(&ptn_freq[ptn]);
#ifdef _OPENMP
lh_final_th = mul_add(lh_ptn, vc_freq, lh_final_th);
#else
lh_final = mul_add(lh_ptn, vc_freq, lh_final);
#endif
}
#ifdef _OPENMP
#pragma omp critical
{
lh_final += lh_final_th;
}
}
#endif
tree_lh += horizontal_add(lh_final);
if (isnan(tree_lh) || isinf(tree_lh)) {
cout << "WARNING: Numerical underflow caused by alignment sites";
i = aln->getNSite();
for (j = 0, c = 0; j < i; j++) {
ptn = aln->getPatternID(j);
if (isnan(_pattern_lh[ptn]) || isinf(_pattern_lh[ptn])) {
cout << " " << j+1;
c++;
if (c >= 10) {
cout << " ...";
break;
}
}
}
cout << endl;
tree_lh = current_it->lh_scale_factor + current_it_back->lh_scale_factor;
for (ptn = 0; ptn < orig_nptn; ptn++) {
if (isnan(_pattern_lh[ptn]) || isinf(_pattern_lh[ptn])) {
_pattern_lh[ptn] = LOG_SCALING_THRESHOLD*4; // log(2^(-1024))
}
tree_lh += _pattern_lh[ptn] * ptn_freq[ptn];
}
}
if (orig_nptn < nptn) {
// ascertaiment bias correction
lh_final = 0.0;
lh_ptn = 0.0;
double prob_const;// df_const, ddf_const;
double *theta = &theta_all[orig_nptn*block];
UBYTE sum_scale_num[nstates+VCSIZE];
memset(sum_scale_num, 0, sizeof(UBYTE)*(nstates+VCSIZE));
if (current_it->node->isLeaf())
memcpy(sum_scale_num, current_it_back->scale_num+orig_nptn, sizeof(UBYTE)*(nptn-orig_nptn));
else if (current_it_back->node->isLeaf())
memcpy(sum_scale_num, current_it->scale_num+orig_nptn, sizeof(UBYTE)*(nptn-orig_nptn));
else {
for (ptn = orig_nptn; ptn < nptn; ptn++)
sum_scale_num[ptn-orig_nptn] = current_it->scale_num[ptn] + current_it_back->scale_num[ptn];
}
for (ptn = orig_nptn; ptn < nptn; ptn+=VCSIZE) {
lh_final += lh_ptn;
// initialization
for (i = 0; i < VCSIZE; i++) {
vc_ptn[i] = vc_val0[0] * VectorClass().load_a(theta+i*block);
}
for (i = 1; i < block/VCSIZE; i++) {
for (j = 0; j < VCSIZE; j++) {
vc_ptn[j] = mul_add(VectorClass().load_a(&theta[i*VCSIZE+j*block]), vc_val0[i], vc_ptn[j]);
}
}
theta += block*VCSIZE;
// bugfix 2016-01-21, prob_const can be rescaled
for (j = 0; j < VCSIZE; j++)
if (sum_scale_num[ptn+j-orig_nptn] >= 1)
vc_ptn[j] = vc_ptn[j] * SCALING_THRESHOLD;
// ptn_invar[ptn] is not aligned
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load(&ptn_invar[ptn]);
}
switch ((nptn-orig_nptn) % VCSIZE) {
case 0:
prob_const = horizontal_add(lh_final+lh_ptn);
break;
case 1:
prob_const = horizontal_add(lh_final)+lh_ptn[0];
break;
case 2:
prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1];
break;
case 3:
prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]+lh_ptn[2];
break;
default:
ASSERT(0);
break;
}
prob_const = log(1.0 - prob_const);
tree_lh -= aln->getNSite() * prob_const;
for (ptn = 0; ptn < orig_nptn; ptn++)
_pattern_lh[ptn] -= prob_const;
}
aligned_free(vc_val0);
return tree_lh;
}
#endif /* PHYLOKERNELMIXRATE_H_ */
|
charge_deposition.h | #ifndef XFIELDS_CHARGE_DEPOSITION_H
#define XFIELDS_CHARGE_DEPOSITION_H
//include_file atomicadd.clh for_context opencl
//include_file atomicadd.h for_context cpu_serial cpu_openmp
/*gpukern*/ void p2m_rectmesh3d(
// INPUTS:
// length of x, y, z arrays
const int nparticles,
// particle positions
/*gpuglmem*/ const double* x,
/*gpuglmem*/ const double* y,
/*gpuglmem*/ const double* z,
// particle weights
/*gpuglmem*/ const double* part_weights,
// mesh origin
const double x0, const double y0, const double z0,
// mesh distances per cell
const double dx, const double dy, const double dz,
// mesh dimension (number of cells)
const int nx, const int ny, const int nz,
// OUTPUTS:
/*gpuglmem*/ double *grid1d
) {
double vol_m1 = 1/(dx*dy*dz);
#pragma omp parallel for //only_for_context cpu_openmp
for (int pidx=0; pidx<nparticles; pidx++){ //vectorize_over pidx nparticles
double pwei = part_weights[pidx];
// indices
int jx = floor((x[pidx] - x0) / dx);
int ix = floor((y[pidx] - y0) / dy);
int kx = floor((z[pidx] - z0) / dz);
// distances
double dxi = x[pidx] - (x0 + jx * dx);
double dyi = y[pidx] - (y0 + ix * dy);
double dzi = z[pidx] - (z0 + kx * dz);
// weights
double wijk = pwei * vol_m1 * (1.-dxi/dx) * (1.-dyi/dy) * (1.-dzi/dz);
double wi1jk = pwei * vol_m1 * (1.-dxi/dx) * (dyi/dy) * (1.-dzi/dz);
double wij1k = pwei * vol_m1 * (dxi/dx) * (1.-dyi/dy) * (1.-dzi/dz);
double wi1j1k = pwei * vol_m1 * (dxi/dx) * (dyi/dy) * (1.-dzi/dz);
double wijk1 = pwei * vol_m1 * (1.-dxi/dx) * (1.-dyi/dy) * (dzi/dz);
double wi1jk1 = pwei * vol_m1 * (1.-dxi/dx) * (dyi/dy) * (dzi/dz);
double wij1k1 = pwei * vol_m1 * (dxi/dx) * (1.-dyi/dy) * (dzi/dz);
double wi1j1k1 = pwei * vol_m1 * (dxi/dx) * (dyi/dy) * (dzi/dz);
if (jx >= 0 && jx < nx - 1 && ix >= 0 && ix < ny - 1
&& kx >= 0 && kx < nz - 1)
{
atomicAdd(&grid1d[jx + ix*nx + kx*nx*ny], wijk);
atomicAdd(&grid1d[jx+1 + ix*nx + kx*nx*ny], wij1k);
atomicAdd(&grid1d[jx + (ix+1)*nx + kx*nx*ny], wi1jk);
atomicAdd(&grid1d[jx+1 + (ix+1)*nx + kx*nx*ny], wi1j1k);
atomicAdd(&grid1d[jx + ix*nx + (kx+1)*nx*ny], wijk1);
atomicAdd(&grid1d[jx+1 + ix*nx + (kx+1)*nx*ny], wij1k1);
atomicAdd(&grid1d[jx + (ix+1)*nx + (kx+1)*nx*ny], wi1jk1);
atomicAdd(&grid1d[jx+1 + (ix+1)*nx + (kx+1)*nx*ny], wi1j1k1);
}
}//end_vectorize
}
#endif
|
DRB081-func-arg-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A function argument passed by value should be private inside the function.
Variable i is read only.
*/
#include<stdio.h>
#include<assert.h>
/* argument pass-by-value */
void f1(int q)
{
q += 1;
}
int main()
{
int i=0;
#pragma omp parallel
{
f1(i);
}
assert (i==0);
printf ("i=%d\n",i);
return 0;
}
|
Booster.h | #ifndef _SQB_TREE_BOSTER_H
#define _SQB_TREE_BOSTER_H
#define myQDebug(...) printf (__VA_ARGS__)
#define myQFatal(...) do{ printf (__VA_ARGS__); exit(1); } while(0)
// This file is part of SQBlib, a gradient boosting/boosted trees implementation.
//
// Copyright (C) 2012 Carlos Becker, http://sites.google.com/site/carlosbecker
//
// SQBlib is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// SQBlib is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with SQBlib. If not, see <http://www.gnu.org/licenses/>.
#include <SQB/Core/RegTree.h>
#include <SQB/Core/Utils.h>
namespace SQB
{
enum LossType { ExpLoss, LogLoss, SquaredLoss };
}
#include <SQB/Core/LineSearch.h>
namespace SQB
{
// weak learner structure
template<typename TRegTreeType, typename TWeightsType>
struct TreeBoosterWL
{
TRegTreeType tree;
TWeightsType alpha; // weighting coefficient
};
template<typename ResponsePredType, typename AlphaType>
struct PredictOperator
{
ResponsePredType &mScores;
AlphaType mAlpha;
PredictOperator( ResponsePredType &scores, AlphaType alpha ) :
mScores(scores), mAlpha(alpha)
{
}
template<typename ScalarType>
inline void apply( const unsigned offset, const ScalarType predVal ) const
{
#pragma omp atomic
mScores.coeffRef( offset ) += mAlpha * predVal;
}
};
// this is fixed for precision issues
typedef double TreeBoosterWeightsType;
// base class for resampler
template<typename TWeightsArrayType, typename TLabelsArrayType>
class TreeBoosterResamplerBase
{
public:
typedef TWeightsArrayType WeightsArrayType;
typedef TLabelsArrayType LabelsArrayType;
// labelArray: labels (-1/1)
// weightArray: weights
// sampledIdxs: should be resized to the desired number, and indeces from 0 to labels.size() - 1 should be filled in
// The resampler can modify the weights, but the new weightArray must have the same size as labelArray
// and contain valid entries
virtual void resample( const LabelsArrayType &labelArray, WeightsArrayType &weightArray, std::vector<unsigned int> *sampledIdxs ) const = 0;
virtual bool modifiesWeights() const = 0; // if this class modifies the weight array
};
template<typename WeightsArrayType, typename LabelsArrayType>
class TreeBoosterNaiveResampler : public TreeBoosterResamplerBase<WeightsArrayType, LabelsArrayType>
{
private:
double mResamplingFactor;
public:
TreeBoosterNaiveResampler() : mResamplingFactor(0.5) {}
void setResamplingFactor( double f ) { mResamplingFactor = f; }
virtual void resample( const LabelsArrayType &labelArray, WeightsArrayType &weightArray, std::vector<unsigned int> *sampledIdxs ) const
{
const unsigned N = labelArray.size();
double DtoSample = round(mResamplingFactor * N);
if (DtoSample < 1) DtoSample = 1;
if (DtoSample > N) DtoSample = N;
sampleWithoutReplacement( (unsigned) DtoSample, N, sampledIdxs );
}
virtual bool modifiesWeights() const { return false; } // if this class modifies the weight array
};
template< typename TSampleListType, typename TFeatureListType, typename TFeatureValueObjectType, typename TClassifierResponseValueObjectType >
class TreeBooster
{
public:
#warning TODO: rename to WeightScalarType
typedef TreeBoosterWeightsType WeightsType; // type used for responses and weights
// used for response (class) and internally for weights/response of the regression trees built
typedef Eigen::Array<WeightsType, Eigen::Dynamic, 1> ResponseArrayType;
typedef Eigen::Array<WeightsType, Eigen::Dynamic, 1> WeightsArrayType;
public:
typedef TSampleListType SampleListType;
typedef TFeatureListType FeatureListType;
typedef TFeatureValueObjectType FeatureValueObjectType;
// the response to train a tree is from an Eigen Array
typedef MatrixResponseAndWeightsValueObject<ResponseArrayType> TreeResponseValueObjectType;
// response of the classifier (i.e. label)
typedef TClassifierResponseValueObjectType ClassifierResponseValueObjectType;
// type of trees to be grown
typedef RegTree<SampleListType, FeatureListType, FeatureValueObjectType, TreeResponseValueObjectType> RegTreeType;
// weights (and response) type derived from MatrixFeatureValueObjectType
//typedef typename MatrixFeatureValueObjectType::WeightScalarType WeightsType;
//#warning TODO: rename to FeatureScalarType
//typedef typename MatrixFeatureValueObjectType::FeatureScalarType ScalarType;
typedef TreeBoosterWL<RegTreeType, WeightsType> TreeBoosterWLType;
typedef TreeBoosterResamplerBase<WeightsArrayType, ResponseArrayType> ResamplerBaseObjectType;
private:
std::vector<TreeBoosterWLType> mWeakLearners;
LossType mLossType;
WeightsType mShrinkageFactor;
unsigned int mMaxTreeDepth;
// if alpha should be searched on the whole training set
// or only on the current resampled subset
bool mAlphaSearchOnSubset;
unsigned int mRandSeed;
// if it should ignore linesearch
bool mDisableLineSearch;
// resampler
const ResamplerBaseObjectType *mResamplerObj;
unsigned int mMTry;
bool mVerboseOutput;
public:
inline WeightsType expLoss( const ResponseArrayType &Y, const ResponseArrayType &curPred, WeightsType normFactor = 1.0 )
{
return ((-Y * curPred).exp() * normFactor).sum();
}
inline WeightsType logLoss( const ResponseArrayType &Y, const ResponseArrayType &curPred, WeightsType normFactor = 1.0 )
{
return ((1 + std::numeric_limits<double>::epsilon() + (- 2 * Y * curPred).exp()).log() * normFactor ).sum();
}
inline WeightsType squaredLoss( const ResponseArrayType &Y, const ResponseArrayType &curPred, WeightsType normFactor = 1.0 )
{
return ( ( Y - curPred ).square() ).sum() * normFactor;
}
TreeBooster()
{
mLossType = ExpLoss;
//mLossType = LogLoss;
mShrinkageFactor = 0.025;
mMaxTreeDepth = 2;
mAlphaSearchOnSubset = false;
mResamplerObj = 0;
mDisableLineSearch = false;
mMTry = 0;
mVerboseOutput = true;
}
void setVerboseOutput(bool yes)
{
mVerboseOutput = yes;
}
void setRandSeed(unsigned int seed)
{
mRandSeed = seed;
}
void setMTry(unsigned int mtry)
{
mMTry = mtry;
}
void setDisableLineSearch(bool yes)
{
mDisableLineSearch = yes;
}
void setLoss( LossType L )
{
mLossType = L;
}
void setAlphaSearchOnSubset( bool searchOnSubset )
{
mAlphaSearchOnSubset = searchOnSubset;
}
void setShrinkageFactor(WeightsType sf)
{
mShrinkageFactor = sf;
}
void setResamplerObject( const ResamplerBaseObjectType *sampler )
{
mResamplerObj = sampler;
}
void setMaxTreeDepth( unsigned td )
{
mMaxTreeDepth = td;
}
static const char * getLossDescription( LossType L )
{
switch(L)
{
case LogLoss:
return "Log Loss";
case ExpLoss:
return "Exp Loss";
case SquaredLoss:
return "Squared Loss";
default:
return "INVALID";
}
}
template<typename ResponsePredType>
void predict( const SampleListType &sampleIndexList,
const FeatureValueObjectType &valueObject,
ResponsePredType &scores,
int maxIters = -1,
bool normalize = false) // if normalize => score is divided by sum(alphas)
{
const unsigned N = sampleIndexList.size();
scores.resize( N );
scores.setZero();
unsigned numWL = mWeakLearners.size();
if ( maxIters > 0 && maxIters <= numWL )
numWL = maxIters;
typedef PredictOperator<ResponsePredType, WeightsType> PredictOperatorType;
#pragma omp parallel for
for (unsigned i=0; i < numWL; i++)
{
mWeakLearners[i].tree.predictWithOperator( sampleIndexList, valueObject, PredictOperatorType( scores, mWeakLearners[i].alpha) );
}
if(normalize)
{
WeightsType sumAlphas = 0.0;
for (unsigned i=0; i < numWL; i++)
sumAlphas += fabs(mWeakLearners[i].alpha);
scores /= sumAlphas;
}
}
// only keeps the first 'max' stumps, removes the others
void cropWeakLearnersTo( unsigned max )
{
if ( max > mWeakLearners.size() )
myQFatal("Wanted to crop %d when there are only %d weak learners.", (int)max, (int) mWeakLearners.size() );
const int toCrop = (int) mWeakLearners.size() - (int) max;
for (int i=0; i < toCrop; i++)
mWeakLearners.pop_back();
}
void learn( const SampleListType &sampleIndexList,
const FeatureListType &featIndexList,
const FeatureValueObjectType &featValueObj,
const ClassifierResponseValueObjectType &classValueObject,
unsigned maxIter)
{
srand(mRandSeed);
#ifdef MEX
// timer to call drawnow
TimerRT drawnowTimer;
#endif
printf("\nShrinkage factor: %f\n", mShrinkageFactor);
printf("Loss type: %s\n\n", getLossDescription(mLossType));
const unsigned N = sampleIndexList.size();
WeightsArrayType curPred(N);
curPred.setZero();
// used to normalize the predictions to avoid numerical issues
WeightsType curLoss = 1.0;
WeightsArrayType expYF(N); // exp(-y*f)
WeightsArrayType W(N); // fitting weights
WeightsArrayType R(N); // fitting responses
WeightsArrayType score(N);
// read classes in vector, this won't change and we will need it frequently
WeightsArrayType Y(N);
#warning tobefixed should be sampleIndexList[i] instead of i if it was really a proper implementation
for (unsigned i=0; i < N; i++)
Y.coeffRef(i) = classValueObject[ i ];
for (unsigned iter=0; iter < maxIter; iter++)
{
TimerRT theTimer;
WeightsType normFactor = 1.0 / curLoss;
switch( mLossType )
{
case ExpLoss:
expYF = ((-Y * curPred).exp() * normFactor) + std::numeric_limits<double>::epsilon();
W = expYF;
R = (-Y * expYF) / W; // TODO: there is a simplification that can be done here!
break;
case LogLoss:
W.setConstant(1.0);
expYF = (- 2 * Y * curPred).exp();
R = (normFactor * Y * expYF) / ( 1 + expYF);
break;
case SquaredLoss:
W.setConstant(2);
R = ( Y - curPred ); // sign already inverted to go in gradient descent dir
break;
}
#if SHOW_TIMINGS
std::cout << "Loss part: " << theTimer.elapsed() << std::endl;
#endif
#if 0
if (mVerboseOutput)
{
std::cout << "R0: " << R.coeff(0) << std::endl;
std::cout << "W0: " << W.coeff(0) << std::endl;
std::cout << "Max/Min R: " << R.maxCoeff() << " / " << R.minCoeff() << std::endl;
std::cout << "Max/Min W: " << W.maxCoeff() << " / " << W.minCoeff() << std::endl;
}
#endif
for (unsigned i=0; i < N; i++)
if (std::isnan(R.coeff(i)) || std::isnan(W.coeff(i))) {
std::cout << "NaN at ExpYF = " << expYF.coeff(i) << std::endl;
myQFatal("error");
}
// fit regression tree
#if SHOW_TIMINGS
theTimer.Reset();
#endif
// subsampling, if set
std::vector<unsigned> subSampIdxs;
if (mResamplerObj != 0)
mResamplerObj->resample( Y, W, &subSampIdxs );
// re-scale W to avoid numerical problems
W /= W.maxCoeff();
// R /= R.abs().maxCoeff();
TreeResponseValueObjectType treeValueObj(R, W);
// but before learning, feature callback
featIndexList.beforeLearnIterCallback();
mWeakLearners.push_back( TreeBoosterWLType() );
mWeakLearners.back().tree.learn( sampleIndexList, featIndexList, featValueObj, treeValueObj, mMaxTreeDepth, (mResamplerObj == 0) ? 0 : &subSampIdxs, mMTry );
// predict values
#if SHOW_TIMINGS
std::cout << "Tree part: " << theTimer.elapsed() << std::endl;
theTimer.Reset();
#endif
mWeakLearners.back().tree.predict( sampleIndexList, featValueObj, score );
#if SHOW_TIMINGS
std::cout << "Pred part: " << theTimer.elapsed() << std::endl;
#endif
if(mVerboseOutput)
myQDebug("Max/Min score: %f / %f\n", score.maxCoeff(), score.minCoeff());
// line search
#if SHOW_TIMINGS
theTimer.Reset();
#endif
double alpha = 1.0;
if (!mDisableLineSearch)
{
LineSearch<ResponseArrayType, ResponseArrayType> LS( curPred, score, Y, mLossType, mAlphaSearchOnSubset ? (&subSampIdxs) : 0 );
alpha = LS.run();
} else
alpha = -1.0; // because we want the -gradient direction
#if SHOW_TIMINGS
std::cout << "LS part: " << theTimer.elapsed() << std::endl;
#endif
if (fabs(alpha) < 1e-9)
alpha = -1e-9;
// shrinkage
alpha = mShrinkageFactor * alpha;
if(mVerboseOutput)
myQDebug("Alpha %f\n", alpha);
mWeakLearners.back().alpha = alpha;
// update prediction score
curPred += alpha * score;
// show new loss
switch(mLossType)
{
case ExpLoss:
curLoss = expLoss( Y, curPred );
break;
case LogLoss:
curLoss = logLoss( Y, curPred );
break;
case SquaredLoss:
curLoss = squaredLoss( Y, curPred ) / N;
break;
}
myQDebug("Iter %d / New loss: %f\n", iter, curLoss);
// compute misclassif error
double miscErr = 0;
for (unsigned i=0; i < N; i++)
if ( curPred.coeff(i) * Y.coeff(i) < 0 )
miscErr++;
miscErr /= N;
if(mVerboseOutput)
{
// myQDebug("Misclassif. error: %f\n", miscErr);
if (TimerRT::implemented())
myQDebug("---> Total WL took: %f\n", theTimer.elapsed() );
}
#ifdef MEX
if (drawnowTimer.elapsed() > 2.00)
{
mexEvalString("drawnow");
drawnowTimer.Reset();
}
#endif
/*if (fabs(alpha) < 1e-9)
{
std::cout << "Breaking at iter " << iter << ", cannot go on!" << std::endl;
break;
}*/
}
}
public:
unsigned numWeakLearners() const
{
return mWeakLearners.size();
}
void printOptionsSummary() const
{
printf("\nTreeboster options:\n");
printf("\tLoss: %s\n", getLossDescription(mLossType));
printf("\tShrinkage factor: %f\n", mShrinkageFactor);
printf("\tMax tree depth: %d\n", (int)mMaxTreeDepth);
if (mMTry != 0)
printf("\tMTry: %d\n", mMTry);
else
printf("\tMTry: not set\n");
printf("\tDisable line search: %s\n", mDisableLineSearch ? "true" : "false");
printf("\n");
}
#ifdef LIBCONFIGXX_VER_REVISION
// s must be a libconfig::Setting::TypeList
void saveToLibconfig(libconfig::Setting &base) const
{
// save each weaklearner
for (unsigned i=0; i < mWeakLearners.size(); i++)
{
libconfig::Setting &group = base.add(libconfig::Setting::TypeGroup);
libconfig::Setting &tree = group.add("tree", libconfig::Setting::TypeList);
libconfig::Setting &alpha = group.add("alpha", libconfig::Setting::TypeFloat);
mWeakLearners[i].tree.saveToLibconfig( tree );
alpha = mWeakLearners[i].alpha;
}
}
// s must be a libconfig::Setting::TypeList
void loadFromLibconfig( const libconfig::Setting &base )
{
const unsigned numNodes = base.getLength();
mWeakLearners.clear();
mWeakLearners.resize( numNodes );
for (unsigned i=0; i < numNodes; i++)
{
const libconfig::Setting &group = base[i];
mWeakLearners[i].tree.loadFromLibconfig( group["tree"] );
mWeakLearners[i].alpha = (double) group["alpha"];
}
}
#endif
public:
#ifdef MEX
mxArray *saveToMatlab() const
{
const char *fieldNames[] = { "alpha", "tree" };
mxArray *ret = mxCreateStructMatrix( 1, mWeakLearners.size(), 2, fieldNames );
for (unsigned i=0; i < mWeakLearners.size(); i++)
{
mxSetField( ret, i, "tree", mWeakLearners[i].tree.saveToMatlab());
{
mxArray *alpha = mxCreateNumericMatrix(1,1, matlabClassID<WeightsType>(), mxREAL);
((WeightsType *)mxGetData(alpha))[0] = mWeakLearners[i].alpha;
mxSetField( ret, i, "alpha", alpha);
}
}
return ret;
}
bool loadFromMatlab( const mxArray *data )
{
const unsigned numEl = mxGetNumberOfElements( data );
if (numEl == 0)
{
printf("Number of elements in cell array cannot be zero");
return false;
}
mWeakLearners.clear();
mWeakLearners.resize( numEl );
// NO ERROR CHECKING HERE, BEWARE!
for (unsigned i=0; i < numEl; i++)
{
mWeakLearners[i].alpha = ((const WeightsType *)mxGetData( mxGetField( data, i, "alpha" ) ))[0];
if (!mWeakLearners[i].tree.loadFromMatlab( mxGetField( data, i, "tree" ) ))
return false;
}
return true;
}
#endif
};
}
#endif //_SQB_TREE_BOSTER_H
|
core_ctrmm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztrmm.c, normal z -> c, Fri Sep 28 17:38:23 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_trmm
*
* Performs a triangular matrix-matrix multiply of the form
*
* \f[B = \alpha [op(A) \times B] \f], if side = PlasmaLeft or
* \f[B = \alpha [B \times op(A)] \f], if side = PlasmaRight
*
* where op( X ) is one of:
*
* - op(A) = A or
* - op(A) = A^T or
* - op(A) = A^H
*
* alpha is a scalar, B is an m-by-n matrix and A is a unit or non-unit, upper
* or lower triangular matrix.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether op( A ) appears on the left or on the right of B:
* - PlasmaLeft: alpha*op( A )*B
* - PlasmaRight: alpha*B*op( A )
*
* @param[in] uplo
* Specifies whether the matrix A is upper triangular or lower
* triangular:
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] transa
* Specifies whether the matrix A is transposed, not transposed or
* conjugate transposed:
* - PlasmaNoTrans: A is transposed;
* - PlasmaTrans: A is not transposed;
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* Specifies whether or not A is unit triangular:
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] m
* The number of rows of matrix B.
* m >= 0.
*
* @param[in] n
* The number of columns of matrix B.
* n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* The triangular matrix A of dimension lda-by-k, where k is m when
* side='L' or 'l' and k is n when when side='R' or 'r'. If uplo =
* PlasmaUpper, the leading k-by-k upper triangular part of the array
* A contains the upper triangular matrix, and the strictly lower
* triangular part of A is not referenced. If uplo = PlasmaLower, the
* leading k-by-k lower triangular part of the array A contains the
* lower triangular matrix, and the strictly upper triangular part of
* A is not referenced. If diag = PlasmaUnit, the diagonal elements of
* A are also not referenced and are assumed to be 1.
*
* @param[in] lda
* The leading dimension of the array A. When side='L' or 'l',
* lda >= max(1,m), when side='R' or 'r' then lda >= max(1,n).
*
* @param[in,out] B
* On entry, the matrix B of dimension ldb-by-n.
* On exit, the result of a triangular matrix-matrix multiply
* ( alpha*op(A)*B ) or ( alpha*B*op(A) ).
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_ctrmm(
plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
plasma_complex32_t alpha, const plasma_complex32_t *A, int lda,
plasma_complex32_t *B, int ldb)
{
cblas_ctrmm(
CblasColMajor,
(CBLAS_SIDE)side, (CBLAS_UPLO)uplo,
(CBLAS_TRANSPOSE)transa, (CBLAS_DIAG)diag,
m, n,
CBLAS_SADDR(alpha), A, lda,
B, ldb);
}
/******************************************************************************/
void plasma_core_omp_ctrmm(
plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
plasma_complex32_t alpha, const plasma_complex32_t *A, int lda,
plasma_complex32_t *B, int ldb,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int k = (side == PlasmaLeft) ? m : n;
#pragma omp task depend(in:A[0:lda*k]) \
depend(inout:B[0:ldb*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_ctrmm(side, uplo,
transa, diag,
m, n,
alpha, A, lda,
B, ldb);
}
}
|
superlu_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Janosch Stascheit
//
#if !defined(KRATOS_SUPERLU_SOLVER_H_INCLUDED )
#define KRATOS_SUPERLU_SOLVER_H_INCLUDED
// External includes
#include "includes/ublas_interface.h"
// #include "external_includes/superlu/superlu.hpp"
#include "SRC/slu_ddefs.h"
// Project includes
#include "includes/define.h"
#include "linear_solvers/direct_solver.h"
namespace ublas = boost::numeric::ublas;
namespace Kratos
{
template< class TSparseSpaceType, class TDenseSpaceType,
class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> >
class SuperLUSolver : public DirectSolver< TSparseSpaceType,
TDenseSpaceType, TReordererType>
{
public:
/**
* Counted pointer of SuperLUSolver
*/
KRATOS_CLASS_POINTER_DEFINITION( SuperLUSolver );
typedef DirectSolver<TSparseSpaceType, TDenseSpaceType, TReordererType> BaseType;
typedef typename TSparseSpaceType::MatrixType SparseMatrixType;
typedef typename TSparseSpaceType::VectorType VectorType;
typedef typename TDenseSpaceType::MatrixType DenseMatrixType;
SuperLUSolver() {}
/**
* Default constructor
*/
SuperLUSolver(Parameters settings): BaseType(settings) {}
/**
* Destructor
*/
~SuperLUSolver() override {}
/**
* Normal solve method.
* Solves the linear system Ax=b and puts the result on SystemVector& rX.
* rX is also th initial guess for iterative methods.
* @param rA. System matrix
* @param rX. Solution vector.
* @param rB. Right hand side vector.
*/
bool Solve(SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
//std::cout << "matrix size in solver: " << rA.size1() << std::endl;
//std::cout << "RHS size in solver SLU: " << rB.size() << std::endl;
// typedef ublas::compressed_matrix<double, ublas::row_major, 0,
// ublas::unbounded_array<int>, ublas::unbounded_array<double> > cm_t;
//make a copy of the RHS
VectorType rC=rB;
if(this->IsNotConsistent(rA, rX, rB))
return false;
superlu_options_t options;
SuperLUStat_t stat;
/* Set the default input options:
options.Fact = DOFACT;
options.Equil = YES;
options.ColPerm = COLAMD;
options.DiagPivotThresh = 1.0;
options.Trans = NOTRANS;
options.IterRefine = NOREFINE;
options.SymmetricMode = NO;
options.PivotGrowth = NO;
options.ConditionNumber = NO;
options.PrintStat = YES;
*/
set_default_options(&options);
options.IterRefine = SLU_DOUBLE;
// options.ColPerm = MMD_AT_PLUS_A;
//Fill the SuperLU matrices
SuperMatrix Aslu, B, L, U;
//create a copy of the matrix
int *index1_vector = new (std::nothrow) int[rA.index1_data().size()];
int *index2_vector = new (std::nothrow) int[rA.index2_data().size()];
// double *values_vector = new (std::nothrow) double[rA.value_data().size()];
for( int unsigned i = 0; i < rA.index1_data().size(); i++ )
index1_vector[i] = (int)rA.index1_data()[i];
for( unsigned int i = 0; i < rA.index2_data().size(); i++ )
index2_vector[i] = (int)rA.index2_data()[i];
/* for( unsigned int i = 0; i < rA.value_data().size(); i++ )
values_vector[i] = (double)rA.value_data()[i];*/
//create a copy of the rhs vector (it will be overwritten with the solution)
/* double *b_vector = new (std::nothrow) double[rB.size()];
for( unsigned int i = 0; i < rB.size(); i++ )
b_vector[i] = rB[i];*/
/*
dCreate_CompCol_Matrix (&Aslu, rA.size1(), rA.size2(),
rA.nnz(),
values_vector,
index2_vector,
index1_vector,
SLU_NR, SLU_D, SLU_GE
);*/
//works also with dCreate_CompCol_Matrix
dCreate_CompRow_Matrix (&Aslu, rA.size1(), rA.size2(),
rA.nnz(),
rA.value_data().begin(),
index2_vector, //can not avoid a copy as ublas uses unsigned int internally
index1_vector, //can not avoid a copy as ublas uses unsigned int internally
SLU_NR, SLU_D, SLU_GE
);
dCreate_Dense_Matrix (&B, rB.size(), 1,&rB[0],rB.size(),SLU_DN, SLU_D, SLU_GE);
//allocate memory for permutation arrays
int* perm_c;
int* perm_r;
if ( !(perm_c = intMalloc(rA.size1())) ) ABORT("Malloc fails for perm_c[].");
if ( !(perm_r = intMalloc(rA.size2())) ) ABORT("Malloc fails for perm_r[].");
//initialize container for statistical data
StatInit(&stat);
//call solver routine
int info;
dgssv(&options, &Aslu, perm_c, perm_r, &L, &U, &B, &stat, &info);
//print output
if (options.PrintStat) {
StatPrint(&stat);
}
//resubstitution of results
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rB.size()); i++ )
rX[i] = rB[i]; // B(i,0);
//recover the RHS
rB=rC;
//deallocate memory used
StatFree(&stat);
SUPERLU_FREE (perm_r);
SUPERLU_FREE (perm_c);
Destroy_SuperMatrix_Store(&Aslu); //note that by using the "store" function we will take care of deallocation ourselves
Destroy_SuperMatrix_Store(&B);
Destroy_SuperNode_Matrix(&L);
Destroy_CompCol_Matrix(&U);
delete [] index1_vector;
delete [] index2_vector;
// delete [] b_vector;
//CHECK WITH VALGRIND IF THIS IS NEEDED ...or if it is done by the lines above
//deallocate tempory storage used for the matrix
// if(b_vector!=NULL) delete [] index1_vector;
// // if(b_vector!=NULL) delete [] index2_vector;
// if(b_vector!=NULL) delete [] values_vector;
// if(b_vector!=NULL) delete [] b_vector;
return true;
}
/**
* Multi solve method for solving a set of linear systems with same coefficient matrix.
* Solves the linear system Ax=b and puts the result on SystemVector& rX.
* rX is also th initial guess for iterative methods.
* @param rA. System matrix
* @param rX. Solution vector.
* @param rB. Right hand side vector.
*/
bool Solve(SparseMatrixType& rA, DenseMatrixType& rX, DenseMatrixType& rB) override
{
/**
* TODO:
* translate SparseMatrixType into SuperMatrix
* call solving routine from SuperLU
*/
// slu::gssv ( rA, rB, slu::atpla_min_degree);
// std::cout<<"Matrix Test:"<<std::endl;
// std::cout<<"boost matrix:"<<std::endl;
// KRATOS_WATCH( rA );
// const int size1 = TDenseSpaceType::Size1(rX);
// const int size2 = TDenseSpaceType::Size2(rX);
bool is_solved = true;
// VectorType x(size1);
// VectorType b(size1);
// define an object to store skyline matrix and factorization
// LUSkylineFactorization<TSparseSpaceType, TDenseSpaceType> myFactorization;
// copy myMatrix into skyline format
// myFactorization.copyFromCSRMatrix(rA);
// factorize it
// myFactorization.factorize();
// for(int i = 0 ; i < size2 ; i++)
// {
// TDenseSpaceType::GetColumn(i,rX, x);
// TDenseSpaceType::GetColumn(i,rB, b);
// and back solve
// myFactorization.backForwardSolve(size1, b, x);
// TDenseSpaceType::SetColumn(i,rX, x);
// TDenseSpaceType::SetColumn(i,rB, b);
// }
return is_solved;
}
/**
* Print information about this object.
*/
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "SuperLU solver finished.";
}
/**
* Print object's data.
*/
void PrintData(std::ostream& rOStream) const override
{
}
private:
/**
* Assignment operator.
*/
SuperLUSolver& operator=(const SuperLUSolver& Other);
/**
* Copy constructor.
*/
SuperLUSolver(const SuperLUSolver& Other);
}; // Class SkylineLUFactorizationSolver
/**
* input stream function
*/
template<class TSparseSpaceType, class TDenseSpaceType,class TReordererType>
inline std::istream& operator >> (std::istream& rIStream, SuperLUSolver< TSparseSpaceType,
TDenseSpaceType, TReordererType>& rThis)
{
return rIStream;
}
/**
* output stream function
*/
template<class TSparseSpaceType, class TDenseSpaceType, class TReordererType>
inline std::ostream& operator << (std::ostream& rOStream,
const SuperLUSolver<TSparseSpaceType,
TDenseSpaceType, TReordererType>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
} // namespace Kratos.
#endif // KRATOS_SUPERLU_SOLVER_H_INCLUDED defined
|
rar_common.c | /*
* This software is Copyright (c) 2011, Dhiru Kholia <dhiru.kholia at gmail.com>
* and Copyright (c) 2012, magnum
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#include "misc.h" // error()
static int omp_t = 1;
static unsigned char *saved_salt;
static unsigned char *saved_key;
static int (*cracked);
static unpack_data_t (*unpack_data);
static unsigned int *saved_len;
static unsigned char *aes_key;
static unsigned char *aes_iv;
#define FORMAT_TAG "$RAR3$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
/* cRARk use 4-char passwords for CPU benchmark */
static struct fmt_tests cpu_tests[] = {
{"$RAR3$*0*b109105f5fe0b899*d4f96690b1a8fe1f120b0290a85a2121", "test"},
{"$RAR3$*0*42ff7e92f24fb2f8*9d8516c8c847f1b941a0feef064aaf0d", "1234"},
{"$RAR3$*0*56ce6de6ddee17fb*4c957e533e00b0e18dfad6accc490ad9", "john"},
/* -p mode tests, -m0 and -m3 (in that order) */
{"$RAR3$*1*c47c5bef0bbd1e98*965f1453*48*47*1*c5e987f81d316d9dcfdb6a1b27105ce63fca2c594da5aa2f6fdf2f65f50f0d66314f8a09da875ae19d6c15636b65c815*30", "test"},
{"$RAR3$*1*b4eee1a48dc95d12*965f1453*64*47*1*0fe529478798c0960dd88a38a05451f9559e15f0cf20b4cac58260b0e5b56699d5871bdcc35bee099cc131eb35b9a116adaedf5ecc26b1c09cadf5185b3092e6*33", "test"},
#ifdef DEBUG
/* Various lengths, these should be in self-test but not benchmark */
/* from CMIYC 2012 */
{"$RAR3$*1*0f263dd52eead558*834015cd*384*693*1*e28e9648f51b59e32f573b302f0e94aadf1050678b90c38dd4e750c7dd281d439ab4cccec5f1bd1ac40b6a1ead60c75625666307171e0fe2639d2397d5f68b97a2a1f733289eac0038b52ec6c3593ff07298fce09118c255b2747a02c2fa3175ab81166ebff2f1f104b9f6284a66f598764bd01f093562b5eeb9471d977bf3d33901acfd9643afe460e1d10b90e0e9bc8b77dc9ac40d40c2d211df9b0ecbcaea72c9d8f15859d59b3c85149b5bb5f56f0218cbbd9f28790777c39e3e499bc207289727afb2b2e02541b726e9ac028f4f05a4d7930efbff97d1ffd786c4a195bbed74997469802159f3b0ae05b703238da264087b6c2729d9023f67c42c5cbe40b6c67eebbfc4658dfb99bfcb523f62133113735e862c1430adf59c837305446e8e34fac00620b99f574fabeb2cd34dc72752014cbf4bd64d35f17cef6d40747c81b12d8c0cd4472089889a53f4d810b212fb314bf58c3dd36796de0feeefaf26be20c6a2fd00517152c58d0b1a95775ef6a1374c608f55f416b78b8c81761f1d*33:1::to-submit-challenges.txt", "wachtwoord"},
{"$RAR3$*1*9759543e04fe3a22*834015cd*384*693*1*cdd2e2478e5153a581c47a201490f5d9b69e01584ae488a2a40203da9ba8c5271ed8edc8f91a7bd262bb5e5de07ecbe9e2003d054a314d16caf2ea1de9f54303abdee1ed044396f7e29c40c38e638f626442efd9f511b4743758cd4a6025c5af81d1252475964937d80bfd50d10c171e7e4041a66c02a74b2b451ae83b6807990fb0652a8cdab530c5a0c497575a6e6cbe2db2035217fe849d2e0b8693b70f3f97b757229b4e89c8273197602c23cc04ff5f24abf3d3c7eb686fc3eddce1bfe710cc0b6e8bd012928127da38c38dd8f056095982afacb4578f6280d51c6739739e033674a9413ca88053f8264c5137d4ac018125c041a3489daaf175ef75e9282d245b92948c1bbcf1c5f25b7028f6d207d87fe9598c2c7ccd1553e842a91ab8ca9261a51b14601a756070388d08039466dfa36f0b4c7ea7dd9ff25c9d98687203c58f9ec8757cafe4d2ed785d5a9e6d5ea838e4cc246a9e6d3c30979dcce56b380b05f9103e6443b35357550b50229c47f845a93a48602790096828d9d6bef0*33:1::to-submit-challenges.txt", "Sleepingbaby210"},
{"$RAR3$*1*79e17c26407a7d52*834015cd*384*693*1*6844a189e732e9390b5a958b623589d5423fa432d756fd00940ac31e245214983507a035d4e0ee09469491551759a66c12150fe6c5d05f334fb0d8302a96d48ef4da04954222e0705507aaa84f8b137f284dbec344eee9cea6b2c4f63540c64df3ee8be3013466d238c5999e9a98eb6375ec5462869bba43401ec95077d0c593352339902c24a3324178e08fe694d11bfec646c652ffeafbdda929052c370ffd89168c83194fedf7c50fc7d9a1fbe64332063d267a181eb07b5d70a5854067db9b66c12703fde62728d3680cf3fdb9933a0f02bfc94f3a682ad5e7c428d7ed44d5ff554a8a445dea28b81e3a2631870e17f3f3c0c0204136802c0701590cc3e4c0ccd9f15e8be245ce9caa6969fab9e8443ac9ad9e73e7446811aee971808350c38c16c0d3372c7f44174666d770e3dd321e8b08fb2dc5e8a6a5b2a1720bad66e54abc194faabc5f24225dd8fee137ba5d4c2ed48c6462618e6333300a5b8dfc75c65608925e786eb0988f7b3a5ab106a55168d1001adc47ce95bba77b38c35b*33:1::to-submit-challenges.txt", "P-i-r-A-T-E"},
{"$RAR3$*1*e1df79fd9ee1dadf*771a163b*64*39*1*edc483d67b94ab22a0a9b8375a461e06fa1108fa72970e16d962092c311970d26eb92a033a42f53027bdc0bb47231a12ed968c8d530a9486a90cbbc00040569b*33", "333"},
{"$RAR3$*1*c83c00534d4af2db*771a163b*64*39*1*05244526d6b32cb9c524a15c79d19bba685f7fc3007a9171c65fc826481f2dce70be6148f2c3497f0d549aa4e864f73d4e4f697fdb66ff528ed1503d9712a414*33", "11eleven111"},
{"$RAR3$*0*c203c4d80a8a09dc*49bbecccc08b5d893f308bce7ad36c0f", "sator"},
{"$RAR3$*0*672fca155cb74ac3*8d534cd5f47a58f6493012cf76d2a68b", "arepo"},
{"$RAR3$*0*c203c4d80a8a09dc*c3055efe7ca6587127fd541a5b88e0e4", "tenet"},
{"$RAR3$*0*672fca155cb74ac3*c760267628f94060cca57be5896003c8", "opera"},
{"$RAR3$*0*c203c4d80a8a09dc*1f406154556d4c895a8be207fd2b5d0c", "rotas"},
{"$RAR3$*0*345f5f573a077ad7*638e388817cc7851e313406fd77730b9", "Boustrophedon"},
{"$RAR3$*0*c9dea41b149b53b4*fcbdb66122d8ebdb32532c22ca7ab9ec", "password"},
{"$RAR3$*0*7ce241baa2bd521b*f2b26d76424efa351c728b321671d074", "@"},
{"$RAR3$*0*ea0ea55ce549c8ab*cf89099c620fcc244bdcbae55a616e76", "ow"},
{"$RAR3$*0*ea0ea55ce549c8ab*6a35a76b1ce9ddc4229b9166d60dc113", "aes"},
{"$RAR3$*0*ea0ea55ce549c8ab*1830771da109f53e2d6e626be16c2666", "sha1"},
{"$RAR3$*0*7e52d3eba9bad316*ee8e1edd435cfa9b8ab861d958a4d588", "fiver"},
{"$RAR3$*0*7e52d3eba9bad316*01987735ab0be7b6538470bd5f5fbf80", "magnum"},
{"$RAR3$*0*7e52d3eba9bad316*f2fe986ed266c6617c48d04a429cf2e3", "7777777"},
{"$RAR3$*0*7e52d3eba9bad316*f0ad6e7fdff9f82fff2aa990105fde21", "password"},
{"$RAR3$*0*7ce241baa2bd521b*3eb0017fa8843017952c53a3ac8332b6", "nine9nine"},
{"$RAR3$*0*7ce241baa2bd521b*ccbf0c3f8e059274606f33cc388b8a2f", "10tenten10"},
{"$RAR3$*0*5fa43f823a60da63*af2630863e12046e42c4501c915636c9", "eleven11111"},
{"$RAR3$*0*5fa43f823a60da63*88c0840d0bd98844173d35f867558ec2", "twelve121212"},
{"$RAR3$*0*4768100a172fa2b6*48edcb5283ee2e4f0e8edb25d0d85eaa", "subconsciousness"},
#endif
{NULL}
};
#ifdef RAR_OPENCL_FORMAT
/* cRARk use 5-char passwords for GPU benchmark */
static struct fmt_tests gpu_tests[] = {
{"$RAR3$*0*c203c4d80a8a09dc*49bbecccc08b5d893f308bce7ad36c0f", "sator"},
{"$RAR3$*0*672fca155cb74ac3*8d534cd5f47a58f6493012cf76d2a68b", "arepo"},
{"$RAR3$*0*c203c4d80a8a09dc*c3055efe7ca6587127fd541a5b88e0e4", "tenet"},
{"$RAR3$*0*672fca155cb74ac3*c760267628f94060cca57be5896003c8", "opera"},
{"$RAR3$*0*c203c4d80a8a09dc*1f406154556d4c895a8be207fd2b5d0c", "rotas"},
/* -p mode tests, -m0 and -m3 (in that order) */
{"$RAR3$*1*c47c5bef0bbd1e98*965f1453*48*47*1*c5e987f81d316d9dcfdb6a1b27105ce63fca2c594da5aa2f6fdf2f65f50f0d66314f8a09da875ae19d6c15636b65c815*30", "test"},
{"$RAR3$*1*b4eee1a48dc95d12*965f1453*64*47*1*0fe529478798c0960dd88a38a05451f9559e15f0cf20b4cac58260b0e5b56699d5871bdcc35bee099cc131eb35b9a116adaedf5ecc26b1c09cadf5185b3092e6*33", "test"},
#ifdef DEBUG
{"$RAR3$*0*af24c0c95e9cafc7*e7f207f30dec96a5ad6f917a69d0209e", "magnum"},
{"$RAR3$*0*2653b9204daa2a8e*39b11a475f486206e2ec6070698d9bbc", "123456"},
{"$RAR3$*0*63f1649f16c2b687*8a89f6453297bcdb66bd756fa10ddd98", "abc123"},
/* -p mode tests, -m0 and -m3 (in that order) */
{"$RAR3$*1*575b083d78672e85*965f1453*48*47*1*cd3d8756438f43ab70e668792e28053f0ad7449af1c66863e3e55332bfa304b2c082b9f23b36cd4a8ebc0b743618c5b2*30", "magnum"},
{"$RAR3$*1*6f5954680c87535a*965f1453*64*47*1*c9bb398b9a5d54f035fd22be54bc6dc75822f55833f30eb4fb8cc0b8218e41e6d01824e3467475b90b994a5ddb7fe19366d293c9ee305316c2a60c3a7eb3ce5a*33", "magnum"},
/* Various lengths, these should be in self-test but not benchmark */
/* from CMIYC 2012 */
{"$RAR3$*1*0f263dd52eead558*834015cd*384*693*1*e28e9648f51b59e32f573b302f0e94aadf1050678b90c38dd4e750c7dd281d439ab4cccec5f1bd1ac40b6a1ead60c75625666307171e0fe2639d2397d5f68b97a2a1f733289eac0038b52ec6c3593ff07298fce09118c255b2747a02c2fa3175ab81166ebff2f1f104b9f6284a66f598764bd01f093562b5eeb9471d977bf3d33901acfd9643afe460e1d10b90e0e9bc8b77dc9ac40d40c2d211df9b0ecbcaea72c9d8f15859d59b3c85149b5bb5f56f0218cbbd9f28790777c39e3e499bc207289727afb2b2e02541b726e9ac028f4f05a4d7930efbff97d1ffd786c4a195bbed74997469802159f3b0ae05b703238da264087b6c2729d9023f67c42c5cbe40b6c67eebbfc4658dfb99bfcb523f62133113735e862c1430adf59c837305446e8e34fac00620b99f574fabeb2cd34dc72752014cbf4bd64d35f17cef6d40747c81b12d8c0cd4472089889a53f4d810b212fb314bf58c3dd36796de0feeefaf26be20c6a2fd00517152c58d0b1a95775ef6a1374c608f55f416b78b8c81761f1d*33:1::to-submit-challenges.txt", "wachtwoord"},
{"$RAR3$*1*9759543e04fe3a22*834015cd*384*693*1*cdd2e2478e5153a581c47a201490f5d9b69e01584ae488a2a40203da9ba8c5271ed8edc8f91a7bd262bb5e5de07ecbe9e2003d054a314d16caf2ea1de9f54303abdee1ed044396f7e29c40c38e638f626442efd9f511b4743758cd4a6025c5af81d1252475964937d80bfd50d10c171e7e4041a66c02a74b2b451ae83b6807990fb0652a8cdab530c5a0c497575a6e6cbe2db2035217fe849d2e0b8693b70f3f97b757229b4e89c8273197602c23cc04ff5f24abf3d3c7eb686fc3eddce1bfe710cc0b6e8bd012928127da38c38dd8f056095982afacb4578f6280d51c6739739e033674a9413ca88053f8264c5137d4ac018125c041a3489daaf175ef75e9282d245b92948c1bbcf1c5f25b7028f6d207d87fe9598c2c7ccd1553e842a91ab8ca9261a51b14601a756070388d08039466dfa36f0b4c7ea7dd9ff25c9d98687203c58f9ec8757cafe4d2ed785d5a9e6d5ea838e4cc246a9e6d3c30979dcce56b380b05f9103e6443b35357550b50229c47f845a93a48602790096828d9d6bef0*33:1::to-submit-challenges.txt", "Sleepingbaby210"},
{"$RAR3$*1*79e17c26407a7d52*834015cd*384*693*1*6844a189e732e9390b5a958b623589d5423fa432d756fd00940ac31e245214983507a035d4e0ee09469491551759a66c12150fe6c5d05f334fb0d8302a96d48ef4da04954222e0705507aaa84f8b137f284dbec344eee9cea6b2c4f63540c64df3ee8be3013466d238c5999e9a98eb6375ec5462869bba43401ec95077d0c593352339902c24a3324178e08fe694d11bfec646c652ffeafbdda929052c370ffd89168c83194fedf7c50fc7d9a1fbe64332063d267a181eb07b5d70a5854067db9b66c12703fde62728d3680cf3fdb9933a0f02bfc94f3a682ad5e7c428d7ed44d5ff554a8a445dea28b81e3a2631870e17f3f3c0c0204136802c0701590cc3e4c0ccd9f15e8be245ce9caa6969fab9e8443ac9ad9e73e7446811aee971808350c38c16c0d3372c7f44174666d770e3dd321e8b08fb2dc5e8a6a5b2a1720bad66e54abc194faabc5f24225dd8fee137ba5d4c2ed48c6462618e6333300a5b8dfc75c65608925e786eb0988f7b3a5ab106a55168d1001adc47ce95bba77b38c35b*33:1::to-submit-challenges.txt", "P-i-r-A-T-E"},
{"$RAR3$*1*e1df79fd9ee1dadf*771a163b*64*39*1*edc483d67b94ab22a0a9b8375a461e06fa1108fa72970e16d962092c311970d26eb92a033a42f53027bdc0bb47231a12ed968c8d530a9486a90cbbc00040569b*33", "333"},
{"$RAR3$*1*c83c00534d4af2db*771a163b*64*39*1*05244526d6b32cb9c524a15c79d19bba685f7fc3007a9171c65fc826481f2dce70be6148f2c3497f0d549aa4e864f73d4e4f697fdb66ff528ed1503d9712a414*33", "11eleven111"},
{"$RAR3$*0*345f5f573a077ad7*638e388817cc7851e313406fd77730b9", "Boustrophedon"},
{"$RAR3$*0*c9dea41b149b53b4*fcbdb66122d8ebdb32532c22ca7ab9ec", "password"},
{"$RAR3$*0*7ce241baa2bd521b*f2b26d76424efa351c728b321671d074", "@"},
{"$RAR3$*0*ea0ea55ce549c8ab*cf89099c620fcc244bdcbae55a616e76", "ow"},
{"$RAR3$*0*ea0ea55ce549c8ab*6a35a76b1ce9ddc4229b9166d60dc113", "aes"},
{"$RAR3$*0*ea0ea55ce549c8ab*1830771da109f53e2d6e626be16c2666", "sha1"},
{"$RAR3$*0*7e52d3eba9bad316*ee8e1edd435cfa9b8ab861d958a4d588", "fiver"},
{"$RAR3$*0*7e52d3eba9bad316*01987735ab0be7b6538470bd5f5fbf80", "magnum"},
{"$RAR3$*0*7e52d3eba9bad316*f2fe986ed266c6617c48d04a429cf2e3", "7777777"},
{"$RAR3$*0*7e52d3eba9bad316*f0ad6e7fdff9f82fff2aa990105fde21", "password"},
{"$RAR3$*0*7ce241baa2bd521b*3eb0017fa8843017952c53a3ac8332b6", "nine9nine"},
{"$RAR3$*0*7ce241baa2bd521b*ccbf0c3f8e059274606f33cc388b8a2f", "10tenten10"},
{"$RAR3$*0*5fa43f823a60da63*af2630863e12046e42c4501c915636c9", "eleven11111"},
{"$RAR3$*0*5fa43f823a60da63*88c0840d0bd98844173d35f867558ec2", "twelve121212"},
{"$RAR3$*0*4768100a172fa2b6*48edcb5283ee2e4f0e8edb25d0d85eaa", "subconsciousness"},
#endif
{NULL}
};
#endif
typedef struct {
dyna_salt dsalt; /* must be first. allows dyna_salt to work */
/* place all items we are NOT going to use for salt comparison, first */
unsigned char *blob;
/* data from this point on, is part of the salt for compare reasons */
unsigned char salt[8];
int type; /* 0 = -hp, 1 = -p */
/* for rar -p mode only: */
union {
unsigned int w;
unsigned char c[4];
} crc;
unsigned long long pack_size;
unsigned long long unp_size;
int method;
unsigned char blob_hash[20]; // holds an sha1, but could be 'any' hash.
// raw_data should be word aligned, and 'ok'
unsigned char raw_data[1];
} rarfile;
static rarfile *cur_file;
#undef set_key
static void set_key(char *key, int index)
{
int plen;
UTF16 buf[PLAINTEXT_LENGTH + 1];
/* UTF-16LE encode the password, encoding aware */
plen = enc_to_utf16(buf, PLAINTEXT_LENGTH, (UTF8*) key, strlen(key));
if (plen < 0)
plen = strlen16(buf);
memcpy(&saved_key[UNICODE_LENGTH * index], buf, UNICODE_LENGTH);
saved_len[index] = plen << 1;
#ifdef RAR_OPENCL_FORMAT
new_keys = 1;
#endif
}
static void *get_salt(char *ciphertext)
{
unsigned int i, type, ex_len;
static unsigned char *ptr;
/* extract data from "salt" */
char *encoded_salt;
char *saltcopy = strdup(ciphertext);
char *keep_ptr = saltcopy;
rarfile *psalt;
unsigned char tmp_salt[8];
int inlined = 1;
SHA_CTX ctx;
if (!ptr) ptr = mem_alloc_tiny(sizeof(rarfile*),sizeof(rarfile*));
saltcopy += FORMAT_TAG_LEN; /* skip over "$RAR3$*" */
type = atoi(strtokm(saltcopy, "*"));
encoded_salt = strtokm(NULL, "*");
for (i = 0; i < 8; i++)
tmp_salt[i] = atoi16[ARCH_INDEX(encoded_salt[i * 2])] * 16 + atoi16[ARCH_INDEX(encoded_salt[i * 2 + 1])];
if (type == 0) { /* rar-hp mode */
char *encoded_ct = strtokm(NULL, "*");
psalt = mem_calloc(1, sizeof(*psalt)+16);
psalt->type = type;
ex_len = 16;
memcpy(psalt->salt, tmp_salt, 8);
for (i = 0; i < 16; i++)
psalt->raw_data[i] = atoi16[ARCH_INDEX(encoded_ct[i * 2])] * 16 + atoi16[ARCH_INDEX(encoded_ct[i * 2 + 1])];
psalt->blob = psalt->raw_data;
psalt->pack_size = 16;
} else {
char *p = strtokm(NULL, "*");
char crc_c[4];
unsigned long long pack_size;
unsigned long long unp_size;
for (i = 0; i < 4; i++)
crc_c[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])];
pack_size = atoll(strtokm(NULL, "*"));
unp_size = atoll(strtokm(NULL, "*"));
inlined = atoi(strtokm(NULL, "*"));
ex_len = pack_size;
/* load ciphertext. We allocate and load all files
here, and they are freed when password found. */
#if HAVE_MMAP
psalt = mem_calloc(1, sizeof(*psalt) + (inlined ? ex_len : 0));
#else
psalt = mem_calloc(1, sizeof(*psalt) + ex_len);
#endif
psalt->type = type;
memcpy(psalt->salt, tmp_salt, 8);
psalt->pack_size = pack_size;
psalt->unp_size = unp_size;
memcpy(psalt->crc.c, crc_c, 4);
if (inlined) {
unsigned char *d = psalt->raw_data;
p = strtokm(NULL, "*");
for (i = 0; i < psalt->pack_size; i++)
*d++ = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])];
psalt->blob = psalt->raw_data;
} else {
FILE *fp;
char *archive_name = strtokm(NULL, "*");
long long pos = atoll(strtokm(NULL, "*"));
#if HAVE_MMAP
if (!(fp = fopen(archive_name, "rb"))) {
fprintf(stderr, "! %s: %s\n", archive_name,
strerror(errno));
error();
}
#ifdef DEBUG
fprintf(stderr, "RAR mmap() len "LLu" offset 0\n",
pos + psalt->pack_size);
#endif
psalt->blob = mmap(NULL, pos + psalt->pack_size,
PROT_READ, MAP_SHARED,
fileno(fp), 0);
if (psalt->blob == MAP_FAILED) {
fprintf(stderr, "Error loading file from "
"archive '%s'. Archive possibly "
"damaged.\n", archive_name);
error();
}
psalt->blob += pos;
#else
size_t count;
if (!(fp = fopen(archive_name, "rb"))) {
fprintf(stderr, "! %s: %s\n", archive_name, strerror(errno));
error();
}
jtr_fseek64(fp, pos, SEEK_SET);
count = fread(psalt->raw_data, 1, psalt->pack_size, fp);
if (count != psalt->pack_size) {
fprintf(stderr, "Error loading file from archive '%s', expected "LLu" bytes, got "Zu". Archive possibly damaged.\n", archive_name, psalt->pack_size, count);
error();
}
psalt->blob = psalt->raw_data;
#endif
fclose(fp);
}
p = strtokm(NULL, "*");
psalt->method = atoi16[ARCH_INDEX(p[0])] * 16 + atoi16[ARCH_INDEX(p[1])];
if (psalt->method != 0x30)
#if ARCH_LITTLE_ENDIAN
psalt->crc.w = ~psalt->crc.w;
#else
psalt->crc.w = JOHNSWAP(~psalt->crc.w);
#endif
}
SHA1_Init(&ctx);
SHA1_Update(&ctx, psalt->blob, psalt->pack_size);
SHA1_Final(psalt->blob_hash, &ctx);
MEM_FREE(keep_ptr);
#if HAVE_MMAP
psalt->dsalt.salt_alloc_needs_free = inlined;
#else
psalt->dsalt.salt_alloc_needs_free = 1;
#endif
psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(rarfile, salt);
psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(rarfile, salt, raw_data, 0);
memcpy(ptr, &psalt, sizeof(rarfile*));
return (void*)ptr;
}
static void set_salt(void *salt)
{
cur_file = *((rarfile**)salt);
memcpy(saved_salt, cur_file->salt, 8);
#ifdef RAR_OPENCL_FORMAT
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_salt, CL_FALSE,
0, 8, saved_salt, 0, NULL, NULL),
"failed in clEnqueueWriteBuffer saved_salt");
#endif
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *ptr, *keeptr;
int mode, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
if (!(ctcopy = strdup(ciphertext))) {
fprintf(stderr, "Memory allocation failed in %s, unable to check if hash is valid!", FORMAT_LABEL);
return 0;
}
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if (!(ptr = strtokm(ctcopy, "*"))) /* -p or -h mode */
goto error;
if (strlen(ptr) != 1 || !isdec(ptr))
goto error;
mode = atoi(ptr);
if (mode > 1)
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* salt */
goto error;
if (hexlenl(ptr, &extra) != 16 || extra) /* 8 bytes of salt */
goto error;
if (!(ptr = strtokm(NULL, "*")))
goto error;
if (mode == 0) {
if (hexlenl(ptr, &extra) != 32 || extra) /* 16 bytes of encrypted known plain */
goto error;
MEM_FREE(keeptr);
return 1;
} else {
int inlined;
long long plen, ulen;
if (hexlenl(ptr, &extra) != 8 || extra) /* 4 bytes of CRC */
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* pack_size */
goto error;
if (strlen(ptr) > 12) { // pack_size > 1 TB? Really?
static int warn_once_pack_size = 1;
if (warn_once_pack_size) {
fprintf(stderr, "pack_size > 1TB not supported (%s)\n", FORMAT_NAME);
warn_once_pack_size = 0;
}
goto error;
}
if ((plen = atoll(ptr)) < 16)
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* unp_size */
goto error;
if (strlen(ptr) > 12) {
static int warn_once_unp_size = 1;
if (warn_once_unp_size) {
fprintf(stderr, "unp_size > 1TB not supported (%s)\n", FORMAT_NAME);
warn_once_unp_size = 0;
}
goto error;
}
if ((ulen = atoll(ptr)) < 1)
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* inlined */
goto error;
if (strlen(ptr) != 1 || !isdec(ptr))
goto error;
inlined = atoi(ptr);
if (inlined > 1)
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* pack_size / archive_name */
goto error;
if (inlined) {
if (hexlenl(ptr, &extra) != plen * 2 || extra)
goto error;
} else {
FILE *fp;
char *archive_name;
archive_name = ptr;
if (!(fp = fopen(archive_name, "rb"))) {
if (!ldr_in_pot)
fprintf(stderr, "! %s: %s, skipping.\n", archive_name, strerror(errno));
goto error;
}
if (!(ptr = strtokm(NULL, "*"))) /* pos */
goto error;
/* We could go on and actually try seeking to pos
but this is enough for now */
fclose(fp);
}
if (!(ptr = strtokm(NULL, "*"))) /* method */
goto error;
}
MEM_FREE(keeptr);
return 1;
error:
#ifdef RAR_DEBUG
{
char buf[68];
strnzcpy(buf, ciphertext, sizeof(buf));
fprintf(stderr, "rejecting %s\n", buf);
}
#endif
MEM_FREE(keeptr);
return 0;
}
static char *get_key(int index)
{
UTF16 tmpbuf[PLAINTEXT_LENGTH + 1];
memcpy(tmpbuf, &((UTF16*) saved_key)[index * PLAINTEXT_LENGTH], saved_len[index]);
memset(&tmpbuf[saved_len[index] >> 1], 0, 2);
return (char*) utf16_to_enc(tmpbuf);
}
#define ADD_BITS(n) \
{ \
if (bits < 9) { \
hold |= ((unsigned int)*next++ << (24 - bits)); \
bits += 8; \
} \
hold <<= n; \
bits -= n; \
}
/*
* This function is loosely based on JimF's check_inflate_CODE2() from
* pkzip_fmt. Together with the other bit-checks, we are rejecting over 96%
* of the candidates without resorting to a slow full check (which in turn
* may reject semi-early, especially if it's a PPM block)
*
* Input is first 16 bytes of RAR buffer decrypted, as-is. It also contain the
* first 2 bits, which have already been decoded, and have told us we had an
* LZ block (RAR always use dynamic Huffman table) and keepOldTable was not set.
*
* RAR use 20 x (4 bits length, optionally 4 bits zerocount), and reversed
* byte order.
*/
static MAYBE_INLINE int check_huffman(unsigned char *next) {
unsigned int bits, hold, i;
int left;
unsigned int ncount[4];
unsigned char *count = (unsigned char*)ncount;
unsigned char bit_length[20];
#ifdef DEBUG
unsigned char *was = next;
#endif
#if ARCH_LITTLE_ENDIAN && ARCH_ALLOWS_UNALIGNED
hold = JOHNSWAP(*(unsigned int*)next);
#else
hold = next[3] + (((unsigned int)next[2]) << 8) +
(((unsigned int)next[1]) << 16) +
(((unsigned int)next[0]) << 24);
#endif
next += 4; // we already have the first 32 bits
hold <<= 2; // we already processed 2 bits, PPM and keepOldTable
bits = 32 - 2;
/* First, read 20 pairs of (bitlength[, zerocount]) */
for (i = 0 ; i < 20 ; i++) {
int length, zero_count;
length = hold >> 28;
ADD_BITS(4);
if (length == 15) {
zero_count = hold >> 28;
ADD_BITS(4);
if (zero_count == 0) {
bit_length[i] = 15;
} else {
zero_count += 2;
while (zero_count-- > 0 &&
i < sizeof(bit_length) /
sizeof(bit_length[0]))
bit_length[i++] = 0;
i--;
}
} else {
bit_length[i] = length;
}
}
#ifdef DEBUG
if (next - was > 16) {
fprintf(stderr, "*** (possible) BUG: check_huffman() needed %u bytes, we only have 16 (bits=%d, hold=0x%08x)\n", (int)(next - was), bits, hold);
dump_stuff_msg("complete buffer", was, 16);
error();
}
#endif
/* Count the number of codes for each code length */
memset(count, 0, 16);
for (i = 0; i < 20; i++) {
++count[bit_length[i]];
}
count[0] = 0;
if (!ncount[0] && !ncount[1] && !ncount[2] && !ncount[3])
return 0; /* No codes at all */
left = 1;
for (i = 1; i < 16; ++i) {
left <<= 1;
left -= count[i];
if (left < 0) {
return 0; /* over-subscribed */
}
}
if (left) {
return 0; /* incomplete set */
}
return 1; /* Passed this check! */
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static inline void check_rar(int count)
{
unsigned int index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
AES_KEY aes_ctx;
unsigned char *key = &aes_key[index * 16];
unsigned char *iv = &aes_iv[index * 16];
AES_set_decrypt_key(key, 128, &aes_ctx);
/* AES decrypt, uses aes_iv, aes_key and blob */
if (cur_file->type == 0) { /* rar-hp mode */
unsigned char plain[16];
AES_cbc_encrypt(cur_file->blob, plain, 16,
&aes_ctx, iv, AES_DECRYPT);
cracked[index] = !memcmp(plain, "\xc4\x3d\x7b\x00\x40\x07\x00", 7);
} else {
if (cur_file->method == 0x30) { /* stored, not deflated */
CRC32_t crc;
unsigned char crc_out[4];
unsigned char plain[0x8000];
unsigned long long size = cur_file->unp_size;
unsigned char *cipher = cur_file->blob;
/* Use full decryption with CRC check.
Compute CRC of the decompressed plaintext */
CRC32_Init(&crc);
while (size) {
unsigned int inlen = (size > 0x8000) ? 0x8000 : size;
AES_cbc_encrypt(cipher, plain, inlen,
&aes_ctx, iv, AES_DECRYPT);
CRC32_Update(&crc, plain, inlen);
size -= inlen;
cipher += inlen;
}
CRC32_Final(crc_out, crc);
/* Compare computed CRC with stored CRC */
cracked[index] = !memcmp(crc_out, &cur_file->crc.c, 4);
} else {
const int solid = 0;
unpack_data_t *unpack_t;
unsigned char plain[20];
unsigned char pre_iv[16];
cracked[index] = 0;
memcpy(pre_iv, iv, 16);
/* Decrypt just one block for early rejection */
AES_cbc_encrypt(cur_file->blob, plain, 16,
&aes_ctx, pre_iv, AES_DECRYPT);
/* Early rejection */
if (plain[0] & 0x80) {
// PPM checks here.
if (!(plain[0] & 0x20) || // Reset bit must be set
(plain[1] & 0x80)) // MaxMB must be < 128
goto bailOut;
} else {
// LZ checks here.
if ((plain[0] & 0x40) || // KeepOldTable can't be set
!check_huffman(plain)) // Huffman table check
goto bailOut;
}
/* Reset stuff for full check */
AES_set_decrypt_key(key, 128, &aes_ctx);
#ifdef _OPENMP
unpack_t = &unpack_data[omp_get_thread_num()];
#else
unpack_t = unpack_data;
#endif
unpack_t->max_size = cur_file->unp_size;
unpack_t->dest_unp_size = cur_file->unp_size;
unpack_t->pack_size = cur_file->pack_size;
unpack_t->iv = iv;
unpack_t->ctx = &aes_ctx;
unpack_t->key = key;
if (rar_unpack29(cur_file->blob, solid, unpack_t))
cracked[index] = !memcmp(&unpack_t->unp_crc, &cur_file->crc.c, 4);
bailOut:;
}
}
}
}
|
QuadtreePolarEuclid.h | /*
* Quadtree.h
*
* Created on: 21.05.2014
* Author: Moritz v. Looz (moritz.looz-corswarem@kit.edu)
*/
#ifndef QUADTREEPOLAREUCLID_H_
#define QUADTREEPOLAREUCLID_H_
#include <vector>
#include <memory>
#include <cmath>
#include <omp.h>
#include <functional>
#include "QuadNodePolarEuclid.h"
namespace NetworKit {
template <class T>
class QuadtreePolarEuclid {
friend class QuadTreePolarEuclidGTest;
public:
QuadtreePolarEuclid() {
root = QuadNodePolarEuclid<T>();
this->maxRadius = 1;
}
/**
* @param maxR Radius of the managed area. Must be smaller than 1.
* @param theoreticalSplit If true, split cells to get the same area in each child cell. Default is false
* @param alpha dispersion Parameter of the point distribution. Only has an effect if theoretical split is true
* @param capacity How many points can inhabit a leaf cell before it is split up?
*
*/
QuadtreePolarEuclid(double maxR,bool theoreticalSplit=false, double alpha=1, count capacity=1000, double balance = 0.5) {
root = QuadNodePolarEuclid<T>(0, 0, 2*M_PI, maxR, capacity, 0,theoreticalSplit,alpha,balance);
this->maxRadius = maxR;
}
QuadtreePolarEuclid(const vector<double> &angles, const vector<double> &radii, const vector<T> &content, bool theoreticalSplit=false, count capacity=1000, double balance = 0.5) {
const count n = angles.size();
assert(angles.size() == radii.size());
assert(radii.size() == content.size());
maxRadius = 0;
for (double radius : radii) {
if (radius > maxRadius) maxRadius = radius;
}
maxRadius = std::nextafter(maxRadius, std::numeric_limits<double>::max());
root = QuadNodePolarEuclid<T>(0, 0, 2*M_PI, maxRadius, capacity, theoreticalSplit,balance);
for (index i = 0; i < n; i++) {
assert(content[i] < n);
root.addContent(content[i], angles[i], radii[i]);
}
}
/**
* @param newcomer content to be added at point x
* @param angle angular coordinate of x
* @param R radial coordinate of x
*/
void addContent(T newcomer, double angle, double r) {
root.addContent(newcomer, angle, r);
}
/**
* @param newcomer content to be removed at point x
* @param angle angular coordinate of x
* @param R radial coordinate of x
*/
bool removeContent(T toRemove, double angle, double r) {
return root.removeContent(toRemove, angle, r);
}
/**
* Get all elements, regardless of position
*
* @return vector<T> of elements
*/
vector<T> getElements() const {
return root.getElements();
}
void extractCoordinates(vector<double> &anglesContainer, vector<double> &radiiContainer) const {
root.getCoordinates(anglesContainer, radiiContainer);
}
void getElementsInEuclideanCircle(const Point2D<double> circleCenter, const double radius, vector<T> &circleDenizens) const {
root.getElementsInEuclideanCircle(circleCenter, radius, false, circleDenizens);
}
count getElementsProbabilistically(Point2D<double> euQuery, std::function<double(double)> prob, vector<T> &circleDenizens) {
return root.getElementsProbabilistically(euQuery, prob, false, circleDenizens);
}
count getElementsProbabilistically(Point2D<double> euQuery, std::function<double(double)> prob, bool suppressLeft, vector<T> &circleDenizens) {
return root.getElementsProbabilistically(euQuery, prob, suppressLeft, circleDenizens);
}
void recount() {
root.recount();
}
count size() const {
return root.size();
}
count height() const {
return root.height();
}
count countLeaves() const {
return root.countLeaves();
}
index indexSubtree(index nextID) {
return root.indexSubtree(nextID);
}
index getCellID(double phi, double r) const {
return root.getCellID(phi, r);
}
double getMaxRadius() const {
return maxRadius;
}
void reindex() {
#pragma omp parallel
{
#pragma omp single nowait
{
root.reindex(0);
}
}
}
/**
* trims the vectors used to hold the content in the leaf cells. Reduces memory usage, makes changes slower
*/
void trim() {
root.trim();
}
private:
QuadNodePolarEuclid<T> root;
double maxRadius;
};
}
#endif /* QUADTREE_H_ */
|
GB_unop__identity_int16_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int16_fp64
// op(A') function: GB_unop_tran__identity_int16_fp64
// C type: int16_t
// A type: double
// cast: int16_t cij = GB_cast_to_int16_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = GB_cast_to_int16_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int16_fp64
(
int16_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int16_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matrix.c | #include "matrix.h"
#include <omp.h>
struct matrix* create_matrix(int rows, int cols) {
struct matrix* m = malloc(sizeof(struct matrix));
m->rows_ = rows;
m->cols_ = cols;
m->rows_align_ = (rows % 4 == 0 ? 0 : 4 - (rows % 4)); //calculate an alignment
m->cols_align_ = (cols % 4 == 0 ? 0 : 4 - (cols % 4));
//allocate memory with alignment
m->matrix_ = _mm_malloc((m->rows_ + m->rows_align_) * (m->cols_ + m->cols_align_) * sizeof(float), ALIGN);
register int order = (m->rows_ + m->rows_align_) * (m->cols_ + m->cols_align_);
#pragma omp parallel for
for(int i = 0; i < order; i++) {
m->matrix_[i] = 0;
}
return m;
}
void get_matrix(struct matrix* m, FILE* in) {
register int rows = m->rows_;
register int cols = m->cols_;
for(int i = 0; i < rows; i++)
for(int j = 0; j < cols; ++j) {
fscanf(in, "%f", get_element(m, i, j));
}
}
void print_matrix(struct matrix* m, FILE* out) {
register int rows = m->rows_;
register int cols = m->cols_;
for(int i = 0; i < rows; ++i) {
for(int j = 0; j < cols; j++) {
fprintf(out, "%.4f ", *get_element(m, i, j));
}
fprintf(out, "\n");
}
fprintf(out, "\n");
}
struct matrix* gen_matrix(int rows, int cols, float range) {
srand(time(NULL));
struct matrix* tmp = create_matrix(rows, cols);
#pragma omp parallel for
for(int i = 0; i < tmp->rows_; i++) {
for(int j = 0; j < tmp->cols_; j++) {
*get_element(tmp, i, j) = ((float)rand() / (float)(RAND_MAX)) * range;
}
}
return tmp;
}
void free_matrix(struct matrix* m) {
if(m == NULL)
return;
_mm_free(m->matrix_);
free(m);
}
void copy_matrix(struct matrix* dst, struct matrix* src) {
memcpy(dst->matrix_, src->matrix_, (src->rows_ + src->rows_align_) * (src->cols_ + src->cols_align_) * sizeof(float));
dst->rows_ = src->rows_;
dst->cols_ = src->cols_;
dst->rows_align_ = src->rows_align_;
dst->cols_align_ = src->cols_align_;
}
struct matrix* transpose_matrix(struct matrix* m) {
struct matrix* tr_m = create_matrix(m->cols_, m->rows_);
register int rows = m->rows_;
register int cols = m->cols_;
#pragma omp parallel for
for(int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
*get_element(tr_m, j, i) = *get_element(m, i, j);
}
}
return tr_m;
}
void sub_matrices(struct matrix* a, struct matrix* b) {
register int order = (a->rows_ + a->rows_align_) * (a->cols_ + a->cols_align_) / 4;
__m128* xx = (__m128*)(a->matrix_);
__m128* yy = (__m128*)(b->matrix_);
#pragma omp parallel for
for(int i = 0; i < order; ++i)
xx[i] = _mm_sub_ps(xx[i], yy[i]);
}
void mul_matrices(struct matrix* a, struct matrix* b, struct matrix* c) {
register int rows = a->rows_;
register int cols = b->cols_;
register int real_order = (a->cols_ + a->cols_align_) / 4;
struct matrix* tr_b = transpose_matrix(b); //transpose right matrix for better performance of multiplication
register int ans_real_order = (c->cols_ + c->cols_align_);
__m128 p, sum;
__m128* row = (__m128*)(a->matrix_);
__m128* col = (__m128*)(tr_b->matrix_);
#pragma omp parallel for private(sum, p)
for(int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
sum = _mm_setzero_ps();
for (int k = 0; k < real_order; ++k) {
p = _mm_mul_ps(row[i * real_order + k], col[j * real_order + k]);
sum = _mm_add_ps(sum, p);
}
p = _mm_movehl_ps(p, sum);
sum = _mm_add_ps(sum, p);
p = _mm_shuffle_ps(sum, sum, 1);
sum = _mm_add_ss(sum, p);
float tmp = 0;
_mm_store_ss(&tmp, sum);
c->matrix_[i * ans_real_order + j] += tmp;
}
}
free_matrix(tr_b);
}
void mul_matrix_on_scalar(struct matrix* m, float scalar) {
float* div_row = _mm_malloc(4 * sizeof(float), ALIGN);
for(int i = 0; i < 4; ++i)
div_row[i] = scalar;
__m128* row = (__m128*)(m->matrix_);
__m128* div = (__m128*)(div_row);
register int order = (m->rows_ + m->rows_align_) * (m->cols_ + m->cols_align_) / 4;
#pragma omp parallel for
for(int i = 0; i < order; ++i) {
row[i] = _mm_mul_ps(row[i], *div);
}
_mm_free(div_row);
}
float squared_euclid_norm(struct matrix* m) {
__m128 sum = _mm_setzero_ps();
__m128 p;
__m128* vector = (__m128*)(m->matrix_);
float rez = 0;
register int order = (m->rows_ + m->rows_align_) * (m->cols_ + m->cols_align_) / 4;
for (int i = 0; i < order; ++i) {
p = _mm_mul_ps(vector[i], vector[i]);
sum = _mm_add_ps(sum, p);
}
p = _mm_movehl_ps(p, sum);
sum = _mm_add_ps(sum, p);
p = _mm_shuffle_ps(sum, sum, 1);
sum = _mm_add_ss(sum, p);
_mm_store_ss(&rez, sum);
return rez;
} |
himenoBMTxpa.c | /********************************************************************
This benchmark test program is measuring a cpu performance
of floating point operation by a Poisson equation solver.
If you have any question, please ask me via email.
written by Ryutaro HIMENO, November 26, 2001.
Version 3.0
----------------------------------------------
Ryutaro Himeno, Dr. of Eng.
Head of Computer Information Division,
RIKEN (The Institute of Pysical and Chemical Research)
Email : himeno@postman.riken.go.jp
---------------------------------------------------------------
You can adjust the size of this benchmark code to fit your target
computer. In that case, please chose following sets of
[mimax][mjmax][mkmax]:
small : 33,33,65
small : 65,65,129
midium: 129,129,257
large : 257,257,513
ext.large: 513,513,1025
This program is to measure a computer performance in MFLOPS
by using a kernel which appears in a linear solver of pressure
Poisson eq. which appears in an incompressible Navier-Stokes solver.
A point-Jacobi method is employed in this solver as this method can
be easyly vectrized and be parallelized.
------------------
Finite-difference method, curvilinear coodinate system
Vectorizable and parallelizable on each grid point
No. of grid points : imax x jmax x kmax including boundaries
------------------
A,B,C:coefficient matrix, wrk1: source term of Poisson equation
wrk2 : working area, OMEGA : relaxation parameter
BND:control variable for boundaries and objects ( = 0 or 1)
P: pressure
********************************************************************/
#include <stdio.h>
#include <sys/time.h>
#define MR(mt,n,r,c,d) mt->m[(n) * mt->mrows * mt->mcols * mt->mdeps + (r) * mt->mcols* mt->mdeps + (c) * mt->mdeps + (d)]
struct Mat {
float* m;
int mnums;
int mrows;
int mcols;
int mdeps;
};
/* prototypes */
typedef struct Mat Matrix;
int newMat(Matrix* Mat, int mnums, int mrows, int mcols, int mdeps);
void clearMat(Matrix* Mat);
void set_param(int i[],char *size);
void mat_set(Matrix* Mat,int l,float z);
void mat_set_init(Matrix* Mat);
float jacobi(int n,Matrix* M1,Matrix* M2,Matrix* M3,
Matrix* M4,Matrix* M5,Matrix* M6,Matrix* M7);
double second();
float omega=0.8;
Matrix a,b,c,p,bnd,wrk1,wrk2;
static iteration = 200;
int
main(int argc, char *argv[])
{
int nn;
int imax,jmax,kmax,mimax,mjmax,mkmax,msize[3];
float gosa,target;
double cpu0,cpu1,cpu,xmflops2,score,flop;
char size[10];
if(argc == 3){
strcpy(size,argv[1]);
iteration = atoi(argv[2]);
} else {
printf("For example: \n");
printf(" Grid-size= XS (32x32x64)\n");
printf("\t S (64x64x128)\n");
printf("\t M (128x128x256)\n");
printf("\t L (256x256x512)\n");
printf("\t XL (512x512x1024)\n\n");
printf("Grid-size = ");
scanf("%s",size);
printf("\n");
}
set_param(msize,size);
mimax= msize[0];
mjmax= msize[1];
mkmax= msize[2];
imax= mimax-1;
jmax= mjmax-1;
kmax= mkmax-1;
target = 60.0;
printf("mimax = %d mjmax = %d mkmax = %d\n",mimax,mjmax,mkmax);
printf("imax = %d jmax = %d kmax =%d\n",imax,jmax,kmax);
/*
* Initializing matrixes
*/
newMat(&p,1,mimax,mjmax,mkmax);
newMat(&bnd,1,mimax,mjmax,mkmax);
newMat(&wrk1,1,mimax,mjmax,mkmax);
newMat(&wrk2,1,mimax,mjmax,mkmax);
newMat(&a,4,mimax,mjmax,mkmax);
newMat(&b,3,mimax,mjmax,mkmax);
newMat(&c,3,mimax,mjmax,mkmax);
mat_set_init(&p);
mat_set(&bnd,0,1.0);
mat_set(&wrk1,0,0.0);
mat_set(&wrk2,0,0.0);
mat_set(&a,0,1.0);
mat_set(&a,1,1.0);
mat_set(&a,2,1.0);
mat_set(&a,3,1.0/6.0);
mat_set(&b,0,0.0);
mat_set(&b,1,0.0);
mat_set(&b,2,0.0);
mat_set(&c,0,1.0);
mat_set(&c,1,1.0);
mat_set(&c,2,1.0);
/*
* Start measuring
*/
nn= 3;
printf(" Start rehearsal measurement process.\n");
printf(" Measure the performance in %d times.\n\n",nn);
cpu0= second();
gosa= jacobi(nn,&a,&b,&c,&p,&bnd,&wrk1,&wrk2);
cpu1= second();
cpu= cpu1 - cpu0;
flop = (double)(kmax-1)*(double)(jmax-1)*(double)(imax-1)*34.0;
if(cpu != 0.0)
xmflops2= flop/cpu*1.e-6*nn;
printf(" MFLOPS: %f time(s): %f %e\n\n",xmflops2,cpu,gosa);
// nn= (int)(target/(cpu/3.0));
nn = iteration;
printf(" Now, start the actual measurement process.\n");
printf(" The loop will be excuted in %d times\n",nn);
printf(" This will take about one minute.\n");
printf(" Wait for a while\n\n");
cpu0 = second();
gosa = jacobi(nn,&a,&b,&c,&p,&bnd,&wrk1,&wrk2);
cpu1 = second();
cpu = cpu1 - cpu0;
if(cpu != 0.0)
xmflops2 = (double)flop/cpu*1.0e-6*nn;
printf("cpu : %f sec.\n", cpu);
printf("Loop executed for %d times\n",nn);
printf("Gosa : %e \n",gosa);
printf("MFLOPS measured : %f\n",xmflops2);
score = xmflops2/82.84;
printf("Score based on Pentium III 600MHz using Fortran 77: %f\n",score);
/*
* Matrix free
*/
clearMat(&p);
clearMat(&bnd);
clearMat(&wrk1);
clearMat(&wrk2);
clearMat(&a);
clearMat(&b);
clearMat(&c);
return (0);
}
void
set_param(int is[],char *size)
{
if(!strcmp(size,"XS") || !strcmp(size,"xs")){
is[0]= 32;
is[1]= 32;
is[2]= 64;
return;
}
if(!strcmp(size,"S") || !strcmp(size,"s")){
is[0]= 64;
is[1]= 64;
is[2]= 128;
return;
}
if(!strcmp(size,"M") || !strcmp(size,"m")){
is[0]= 128;
is[1]= 128;
is[2]= 256;
return;
}
if(!strcmp(size,"L") || !strcmp(size,"l")){
is[0]= 256;
is[1]= 256;
is[2]= 512;
return;
}
if(!strcmp(size,"Xl") || !strcmp(size,"xl")){
is[0]= 512;
is[1]= 512;
is[2]= 1024;
return;
}
}
int
newMat(Matrix* Mat, int mnums,int mrows, int mcols, int mdeps)
{
Mat->mnums= mnums;
Mat->mrows= mrows;
Mat->mcols= mcols;
Mat->mdeps= mdeps;
Mat->m= NULL;
Mat->m= (float*)
malloc(mnums * mrows * mcols * mdeps * sizeof(float));
return(Mat->m != NULL) ? 1:0;
}
void
clearMat(Matrix* Mat)
{
if(Mat->m)
free(Mat->m);
Mat->m= NULL;
Mat->mnums= 0;
Mat->mcols= 0;
Mat->mrows= 0;
Mat->mdeps= 0;
}
void
mat_set(Matrix* Mat, int l, float val)
{
int i,j,k;
for(i=0; i<Mat->mrows; i++)
for(j=0; j<Mat->mcols; j++)
for(k=0; k<Mat->mdeps; k++)
MR(Mat,l,i,j,k)= val;
}
void
mat_set_init(Matrix* Mat)
{
int i,j,k,l;
float tt;
for(i=0; i<Mat->mrows; i++)
for(j=0; j<Mat->mcols; j++)
for(k=0; k<Mat->mdeps; k++)
MR(Mat,0,i,j,k)= (float)(i*i)
/(float)((Mat->mrows - 1)*(Mat->mrows - 1));
}
float
jacobi(int nn, Matrix* a,Matrix* b,Matrix* c,
Matrix* p,Matrix* bnd,Matrix* wrk1,Matrix* wrk2)
{
int i,j,k,n,imax,jmax,kmax;
float gosa,gosa1,s0,ss;
imax= p->mrows-1;
jmax= p->mcols-1;
kmax= p->mdeps-1;
#pragma omp parallel shared(a,p,b,c,bnd,wrk1,wrk2,nn,imax,jmax,kmax,omega,gosa) private(i,j,k,s0,ss,gosa1,n)
{
for(n=0 ; n<nn ; n++){
#pragma omp barrier
#pragma omp master
{
gosa = 0.0;
}
gosa1= 0.0;
#pragma omp for nowait
for(i=1 ; i<imax; i++)
for(j=1 ; j<jmax ; j++)
for(k=1 ; k<kmax ; k++){
s0= MR(a,0,i,j,k)*MR(p,0,i+1,j, k)
+ MR(a,1,i,j,k)*MR(p,0,i, j+1,k)
+ MR(a,2,i,j,k)*MR(p,0,i, j, k+1)
+ MR(b,0,i,j,k)
*( MR(p,0,i+1,j+1,k) - MR(p,0,i+1,j-1,k)
- MR(p,0,i-1,j+1,k) + MR(p,0,i-1,j-1,k) )
+ MR(b,1,i,j,k)
*( MR(p,0,i,j+1,k+1) - MR(p,0,i,j-1,k+1)
- MR(p,0,i,j+1,k-1) + MR(p,0,i,j-1,k-1) )
+ MR(b,2,i,j,k)
*( MR(p,0,i+1,j,k+1) - MR(p,0,i-1,j,k+1)
- MR(p,0,i+1,j,k-1) + MR(p,0,i-1,j,k-1) )
+ MR(c,0,i,j,k) * MR(p,0,i-1,j, k)
+ MR(c,1,i,j,k) * MR(p,0,i, j-1,k)
+ MR(c,2,i,j,k) * MR(p,0,i, j, k-1)
+ MR(wrk1,0,i,j,k);
ss= (s0*MR(a,3,i,j,k) - MR(p,0,i,j,k))*MR(bnd,0,i,j,k);
gosa1+= ss*ss;
MR(wrk2,0,i,j,k)= MR(p,0,i,j,k) + omega*ss;
}
#pragma omp barrier
#pragma omp for nowait
for(i=1 ; i<imax ; i++)
for(j=1 ; j<jmax ; j++)
for(k=1 ; k<kmax ; k++)
MR(p,0,i,j,k)= MR(wrk2,0,i,j,k);
#pragma omp critical
{
gosa+= gosa1;
}
} /* end n loop */
}
return(gosa);
}
double
second()
{
struct timeval tm;
double t ;
static int base_sec = 0,base_usec = 0;
gettimeofday(&tm, NULL);
if(base_sec == 0 && base_usec == 0)
{
base_sec = tm.tv_sec;
base_usec = tm.tv_usec;
t = 0.0;
} else {
t = (double) (tm.tv_sec-base_sec) +
((double) (tm.tv_usec-base_usec))/1.0e6 ;
}
return t ;
}
|
GB_binop__isgt_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_int8)
// A.*B function (eWiseMult): GB (_AemultB_01__isgt_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__isgt_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_int8)
// A*D function (colscale): GB (_AxD__isgt_int8)
// D*A function (rowscale): GB (_DxB__isgt_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_int8)
// C=scalar+B GB (_bind1st__isgt_int8)
// C=scalar+B' GB (_bind1st_tran__isgt_int8)
// C=A+scalar GB (_bind2nd__isgt_int8)
// C=A'+scalar GB (_bind2nd_tran__isgt_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_INT8 || GxB_NO_ISGT_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isgt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_3x3_pack1to4_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack1to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x4_t _bias0 = bias ? vld1_f16(bias + p * 4) : vdup_n_f16((__fp16)0.f);
out0.fill(_bias0);
const __fp16* k0 = kernel.channel(p);
int q = 0;
for (; q < inch; q++)
{
__fp16* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
float16x4_t _k00 = vld1_f16(k0);
float16x4_t _k01 = vld1_f16(k0 + 4);
float16x4_t _k02 = vld1_f16(k0 + 8);
float16x4_t _k10 = vld1_f16(k0 + 12);
float16x4_t _k11 = vld1_f16(k0 + 16);
float16x4_t _k12 = vld1_f16(k0 + 20);
float16x4_t _k20 = vld1_f16(k0 + 24);
float16x4_t _k21 = vld1_f16(k0 + 28);
float16x4_t _k22 = vld1_f16(k0 + 32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0] \n" // sum4 sum5 sum6 sum7
"sub %0, %0, #32 \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1], #16 \n" // r0
"ld1 {v1.4h}, [%1] \n"
"fmla v24.4h, %8.4h, v0.h[0] \n"
"fmla v25.4h, %8.4h, v0.h[1] \n"
"fmla v26.4h, %8.4h, v0.h[2] \n"
"fmla v27.4h, %8.4h, v0.h[3] \n"
"fmla v28.4h, %8.4h, v0.h[4] \n"
"fmla v29.4h, %8.4h, v0.h[5] \n"
"fmla v30.4h, %8.4h, v0.h[6] \n"
"fmla v31.4h, %8.4h, v0.h[7] \n"
"fmla v24.4h, %9.4h, v0.h[1] \n"
"fmla v25.4h, %9.4h, v0.h[2] \n"
"fmla v26.4h, %9.4h, v0.h[3] \n"
"fmla v27.4h, %9.4h, v0.h[4] \n"
"fmla v28.4h, %9.4h, v0.h[5] \n"
"fmla v29.4h, %9.4h, v0.h[6] \n"
"fmla v30.4h, %9.4h, v0.h[7] \n"
"fmla v31.4h, %9.4h, v1.h[0] \n"
"fmla v24.4h, %10.4h, v0.h[2] \n"
"fmla v25.4h, %10.4h, v0.h[3] \n"
"fmla v26.4h, %10.4h, v0.h[4] \n"
"fmla v27.4h, %10.4h, v0.h[5] \n"
"fmla v28.4h, %10.4h, v0.h[6] \n"
"fmla v29.4h, %10.4h, v0.h[7] \n"
"fmla v30.4h, %10.4h, v1.h[0] \n"
"fmla v31.4h, %10.4h, v1.h[1] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.8h}, [%2], #16 \n" // r1
"ld1 {v3.4h}, [%2] \n"
"fmla v24.4h, %11.4h, v2.h[0] \n"
"fmla v25.4h, %11.4h, v2.h[1] \n"
"fmla v26.4h, %11.4h, v2.h[2] \n"
"fmla v27.4h, %11.4h, v2.h[3] \n"
"fmla v28.4h, %11.4h, v2.h[4] \n"
"fmla v29.4h, %11.4h, v2.h[5] \n"
"fmla v30.4h, %11.4h, v2.h[6] \n"
"fmla v31.4h, %11.4h, v2.h[7] \n"
"fmla v24.4h, %12.4h, v2.h[1] \n"
"fmla v25.4h, %12.4h, v2.h[2] \n"
"fmla v26.4h, %12.4h, v2.h[3] \n"
"fmla v27.4h, %12.4h, v2.h[4] \n"
"fmla v28.4h, %12.4h, v2.h[5] \n"
"fmla v29.4h, %12.4h, v2.h[6] \n"
"fmla v30.4h, %12.4h, v2.h[7] \n"
"fmla v31.4h, %12.4h, v3.h[0] \n"
"fmla v24.4h, %13.4h, v2.h[2] \n"
"fmla v25.4h, %13.4h, v2.h[3] \n"
"fmla v26.4h, %13.4h, v2.h[4] \n"
"fmla v27.4h, %13.4h, v2.h[5] \n"
"fmla v28.4h, %13.4h, v2.h[6] \n"
"fmla v29.4h, %13.4h, v2.h[7] \n"
"fmla v30.4h, %13.4h, v3.h[0] \n"
"fmla v31.4h, %13.4h, v3.h[1] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.8h}, [%3], #16 \n" // r2
"ld1 {v5.4h}, [%3] \n"
"fmla v24.4h, %14.4h, v4.h[0] \n"
"fmla v25.4h, %14.4h, v4.h[1] \n"
"fmla v26.4h, %14.4h, v4.h[2] \n"
"fmla v27.4h, %14.4h, v4.h[3] \n"
"fmla v28.4h, %14.4h, v4.h[4] \n"
"fmla v29.4h, %14.4h, v4.h[5] \n"
"fmla v30.4h, %14.4h, v4.h[6] \n"
"fmla v31.4h, %14.4h, v4.h[7] \n"
"fmla v24.4h, %15.4h, v4.h[1] \n"
"fmla v25.4h, %15.4h, v4.h[2] \n"
"fmla v26.4h, %15.4h, v4.h[3] \n"
"fmla v27.4h, %15.4h, v4.h[4] \n"
"fmla v28.4h, %15.4h, v4.h[5] \n"
"fmla v29.4h, %15.4h, v4.h[6] \n"
"fmla v30.4h, %15.4h, v4.h[7] \n"
"fmla v31.4h, %15.4h, v5.h[0] \n"
"fmla v24.4h, %16.4h, v4.h[2] \n"
"fmla v25.4h, %16.4h, v4.h[3] \n"
"fmla v26.4h, %16.4h, v4.h[4] \n"
"fmla v27.4h, %16.4h, v4.h[5] \n"
"fmla v28.4h, %16.4h, v4.h[6] \n"
"fmla v29.4h, %16.4h, v4.h[7] \n"
"fmla v30.4h, %16.4h, v5.h[0] \n"
"fmla v31.4h, %16.4h, v5.h[1] \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0] \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1] \n" // r0
"fmla v28.4h, %8.4h, v0.h[0] \n"
"fmla v29.4h, %8.4h, v0.h[1] \n"
"fmla v30.4h, %8.4h, v0.h[2] \n"
"fmla v31.4h, %8.4h, v0.h[3] \n"
"fmla v28.4h, %9.4h, v0.h[1] \n"
"fmla v29.4h, %9.4h, v0.h[2] \n"
"fmla v30.4h, %9.4h, v0.h[3] \n"
"fmla v31.4h, %9.4h, v0.h[4] \n"
"fmla v28.4h, %10.4h, v0.h[2] \n"
"fmla v29.4h, %10.4h, v0.h[3] \n"
"fmla v30.4h, %10.4h, v0.h[4] \n"
"fmla v31.4h, %10.4h, v0.h[5] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v1.8h}, [%2] \n" // r1
"fmla v28.4h, %11.4h, v1.h[0] \n"
"fmla v29.4h, %11.4h, v1.h[1] \n"
"fmla v30.4h, %11.4h, v1.h[2] \n"
"fmla v31.4h, %11.4h, v1.h[3] \n"
"fmla v28.4h, %12.4h, v1.h[1] \n"
"fmla v29.4h, %12.4h, v1.h[2] \n"
"fmla v30.4h, %12.4h, v1.h[3] \n"
"fmla v31.4h, %12.4h, v1.h[4] \n"
"fmla v28.4h, %13.4h, v1.h[2] \n"
"fmla v29.4h, %13.4h, v1.h[3] \n"
"fmla v30.4h, %13.4h, v1.h[4] \n"
"fmla v31.4h, %13.4h, v1.h[5] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v2.8h}, [%3] \n" // r2
"fmla v28.4h, %14.4h, v2.h[0] \n"
"fmla v29.4h, %14.4h, v2.h[1] \n"
"fmla v30.4h, %14.4h, v2.h[2] \n"
"fmla v31.4h, %14.4h, v2.h[3] \n"
"fmla v28.4h, %15.4h, v2.h[1] \n"
"fmla v29.4h, %15.4h, v2.h[2] \n"
"fmla v30.4h, %15.4h, v2.h[3] \n"
"fmla v31.4h, %15.4h, v2.h[4] \n"
"fmla v28.4h, %16.4h, v2.h[2] \n"
"fmla v29.4h, %16.4h, v2.h[3] \n"
"fmla v30.4h, %16.4h, v2.h[4] \n"
"fmla v31.4h, %16.4h, v2.h[5] \n"
"add %1, %1, #8 \n"
"add %2, %2, #8 \n"
"add %3, %3, #8 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v30.4h, v31.4h}, [%0] \n" // sum0 sum1
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1] \n" // r0
"fmla v30.4h, %8.4h, v0.h[0] \n"
"fmla v31.4h, %8.4h, v0.h[1] \n"
"fmla v30.4h, %9.4h, v0.h[1] \n"
"fmla v31.4h, %9.4h, v0.h[2] \n"
"fmla v30.4h, %10.4h, v0.h[2] \n"
"fmla v31.4h, %10.4h, v0.h[3] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v1.4h}, [%2] \n" // r1
"fmla v30.4h, %11.4h, v1.h[0] \n"
"fmla v31.4h, %11.4h, v1.h[1] \n"
"fmla v30.4h, %12.4h, v1.h[1] \n"
"fmla v31.4h, %12.4h, v1.h[2] \n"
"fmla v30.4h, %13.4h, v1.h[2] \n"
"fmla v31.4h, %13.4h, v1.h[3] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v2.4h}, [%3] \n" // r2
"fmla v30.4h, %14.4h, v2.h[0] \n"
"fmla v31.4h, %14.4h, v2.h[1] \n"
"fmla v30.4h, %15.4h, v2.h[1] \n"
"fmla v31.4h, %15.4h, v2.h[2] \n"
"fmla v30.4h, %16.4h, v2.h[2] \n"
"fmla v31.4h, %16.4h, v2.h[3] \n"
"add %1, %1, #4 \n"
"add %2, %2, #4 \n"
"add %3, %3, #4 \n"
"st1 {v30.4h, v31.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v30.4h}, [%0] \n" // sum0
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1] \n" // r0
"fmla v30.4h, %8.4h, v0.h[0] \n"
"fmla v30.4h, %9.4h, v0.h[1] \n"
"fmla v30.4h, %10.4h, v0.h[2] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v1.4h}, [%2] \n" // r1
"fmla v30.4h, %11.4h, v1.h[0] \n"
"fmla v30.4h, %12.4h, v1.h[1] \n"
"fmla v30.4h, %13.4h, v1.h[2] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v2.4h}, [%3] \n" // r2
"fmla v30.4h, %14.4h, v2.h[0] \n"
"fmla v30.4h, %15.4h, v2.h[1] \n"
"fmla v30.4h, %16.4h, v2.h[2] \n"
"add %1, %1, #2 \n"
"add %2, %2, #2 \n"
"add %3, %3, #2 \n"
"st1 {v30.4h}, [%0], #8 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v30");
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 4;
}
}
}
static void conv3x3s2_pack1to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x4_t _bias0 = bias ? vld1_f16(bias + p * 4) : vdup_n_f16((__fp16)0.f);
out0.fill(_bias0);
const __fp16* k0 = kernel.channel(p);
int q = 0;
for (; q < inch; q++)
{
__fp16* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
float16x4_t _k00 = vld1_f16(k0);
float16x4_t _k01 = vld1_f16(k0 + 4);
float16x4_t _k02 = vld1_f16(k0 + 8);
float16x4_t _k10 = vld1_f16(k0 + 12);
float16x4_t _k11 = vld1_f16(k0 + 16);
float16x4_t _k12 = vld1_f16(k0 + 20);
float16x4_t _k20 = vld1_f16(k0 + 24);
float16x4_t _k21 = vld1_f16(k0 + 28);
float16x4_t _k22 = vld1_f16(k0 + 32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0] \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1], #16 \n" // r0
"ld1 {v1.h}[0], [%1] \n"
"fmla v28.4h, %8.4h, v0.h[0] \n"
"fmla v29.4h, %8.4h, v0.h[2] \n"
"fmla v30.4h, %8.4h, v0.h[4] \n"
"fmla v31.4h, %8.4h, v0.h[6] \n"
"fmla v28.4h, %9.4h, v0.h[1] \n"
"fmla v29.4h, %9.4h, v0.h[3] \n"
"fmla v30.4h, %9.4h, v0.h[5] \n"
"fmla v31.4h, %9.4h, v0.h[7] \n"
"fmla v28.4h, %10.4h, v0.h[2] \n"
"fmla v29.4h, %10.4h, v0.h[4] \n"
"fmla v30.4h, %10.4h, v0.h[6] \n"
"fmla v31.4h, %10.4h, v1.h[0] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.8h}, [%2], #16 \n" // r1
"ld1 {v3.h}[0], [%2] \n"
"fmla v28.4h, %11.4h, v2.h[0] \n"
"fmla v29.4h, %11.4h, v2.h[2] \n"
"fmla v30.4h, %11.4h, v2.h[4] \n"
"fmla v31.4h, %11.4h, v2.h[6] \n"
"fmla v28.4h, %12.4h, v2.h[1] \n"
"fmla v29.4h, %12.4h, v2.h[3] \n"
"fmla v30.4h, %12.4h, v2.h[5] \n"
"fmla v31.4h, %12.4h, v2.h[7] \n"
"fmla v28.4h, %13.4h, v2.h[2] \n"
"fmla v29.4h, %13.4h, v2.h[4] \n"
"fmla v30.4h, %13.4h, v2.h[6] \n"
"fmla v31.4h, %13.4h, v3.h[0] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.8h}, [%3], #16 \n" // r2
"ld1 {v5.h}[0], [%3] \n"
"fmla v28.4h, %14.4h, v4.h[0] \n"
"fmla v29.4h, %14.4h, v4.h[2] \n"
"fmla v30.4h, %14.4h, v4.h[4] \n"
"fmla v31.4h, %14.4h, v4.h[6] \n"
"fmla v28.4h, %15.4h, v4.h[1] \n"
"fmla v29.4h, %15.4h, v4.h[3] \n"
"fmla v30.4h, %15.4h, v4.h[5] \n"
"fmla v31.4h, %15.4h, v4.h[7] \n"
"fmla v28.4h, %16.4h, v4.h[2] \n"
"fmla v29.4h, %16.4h, v4.h[4] \n"
"fmla v30.4h, %16.4h, v4.h[6] \n"
"fmla v31.4h, %16.4h, v5.h[0] \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v30.4h, v31.4h}, [%0] \n" // sum0 sum1
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1], #8 \n" // r0
"ld1 {v1.h}[0], [%1] \n"
"fmla v30.4h, %8.4h, v0.h[0] \n"
"fmla v31.4h, %8.4h, v0.h[2] \n"
"fmla v30.4h, %9.4h, v0.h[1] \n"
"fmla v31.4h, %9.4h, v0.h[3] \n"
"fmla v30.4h, %10.4h, v0.h[2] \n"
"fmla v31.4h, %10.4h, v1.h[0] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v2.4h}, [%2], #8 \n" // r1
"ld1 {v3.h}[0], [%2] \n"
"fmla v30.4h, %11.4h, v2.h[0] \n"
"fmla v31.4h, %11.4h, v2.h[2] \n"
"fmla v30.4h, %12.4h, v2.h[1] \n"
"fmla v31.4h, %12.4h, v2.h[3] \n"
"fmla v30.4h, %13.4h, v2.h[2] \n"
"fmla v31.4h, %13.4h, v3.h[0] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3], #8 \n" // r2
"ld1 {v5.h}[0], [%3] \n"
"fmla v30.4h, %14.4h, v4.h[0] \n"
"fmla v31.4h, %14.4h, v4.h[2] \n"
"fmla v30.4h, %15.4h, v4.h[1] \n"
"fmla v31.4h, %15.4h, v4.h[3] \n"
"fmla v30.4h, %16.4h, v4.h[2] \n"
"fmla v31.4h, %16.4h, v5.h[0] \n"
"st1 {v30.4h, v31.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v30.4h}, [%0] \n" // sum0
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1] \n" // r0
"fmla v30.4h, %8.4h, v0.h[0] \n"
"fmla v30.4h, %9.4h, v0.h[1] \n"
"fmla v30.4h, %10.4h, v0.h[2] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v1.4h}, [%2] \n" // r1
"fmla v30.4h, %11.4h, v1.h[0] \n"
"fmla v30.4h, %12.4h, v1.h[1] \n"
"fmla v30.4h, %13.4h, v1.h[2] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v2.4h}, [%3] \n" // r2
"fmla v30.4h, %14.4h, v2.h[0] \n"
"fmla v30.4h, %15.4h, v2.h[1] \n"
"fmla v30.4h, %16.4h, v2.h[2] \n"
"add %1, %1, #4 \n"
"add %2, %2, #4 \n"
"add %3, %3, #4 \n"
"st1 {v30.4h}, [%0], #8 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v30");
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 4;
}
}
}
|
primo_grande_solution.c | #include <stdio.h>
#include <math.h>
#include <limits.h>
#include <omp.h>
typedef unsigned long long Entero_grande;
#define ENTERO_MAS_GRANDE ULLONG_MAX
int primo(Entero_grande n)
{
int p;
Entero_grande i, s;
p = (n % 2 != 0 || n == 2);
if (p) {
s = sqrt(n);
int numberOfThreads;
#pragma omp parallel
numberOfThreads = omp_get_num_threads();
// do not know when to stop
#pragma omp parallel private(i)
{
//iteration: 3,5,7,9,11,13,15,17,19 ....
//thread: 0,1,2,0,1, ,2, 0,1, 2
int threadId = omp_get_thread_num();
int startIndex = (threadId*2)+3;
int offset = 2* numberOfThreads;
for (i = startIndex; p && i <= s; i += offset)
if (n % i == 0) p = 0;
}
}
return p;
}
int main()
{
int t1 = omp_get_wtime();
Entero_grande n;
for (n = ENTERO_MAS_GRANDE; !primo(n); n -= 2) {
/* NADA */
}
int t2 = omp_get_wtime();
printf("El mayor primo que cabe en %d bytes es %llu.\n",
sizeof(Entero_grande), n);
printf("took %i seconds",t2-t1);
return 0;
}
|
stencil_opt5.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "malloc2D.h"
#include "timer.h"
#define SWAP_PTR(xnew,xold,xtmp) (xtmp=xnew, xnew=xold, xold=xtmp)
int main(int argc, char *argv[])
{
#pragma omp parallel
if (omp_get_thread_num() == 0) printf("Running with %d thread(s)\n",omp_get_num_threads());
struct timespec tstart_init, tstart_flush, tstart_stencil, tstart_total;
double init_time, flush_time, stencil_time, total_time;
int imax=2002, jmax = 2002;
double** xtmp;
double** x = malloc2D(jmax, imax);
double** xnew = malloc2D(jmax, imax);
int *flush = (int *)malloc(jmax*imax*sizeof(int)*4);
cpu_timer_start(&tstart_total);
#pragma omp parallel
{
int thread_id = omp_get_thread_num();
if (thread_id == 0) cpu_timer_start(&tstart_init);
#pragma omp for
for (int j = 0; j < jmax; j++){
for (int i = 0; i < imax; i++){
xnew[j][i] = 0.0;
x[j][i] = 5.0;
}
}
#pragma omp for
for (int j = jmax/2 - 5; j < jmax/2 + 5; j++){
for (int i = imax/2 - 5; i < imax/2 -1; i++){
x[j][i] = 400.0;
}
}
if (thread_id == 0) init_time += cpu_timer_stop(tstart_init);
for (int iter = 0; iter < 10000; iter++){
if (thread_id ==0) cpu_timer_start(&tstart_flush);
#pragma omp for nowait
for (int l = 1; l < jmax*imax*4; l++){
flush[l] = 1.0;
}
if (thread_id == 0){
flush_time += cpu_timer_stop(tstart_flush);
cpu_timer_start(&tstart_stencil);
}
#pragma omp for
for (int j = 1; j < jmax-1; j++){
for (int i = 1; i < imax-1; i++){
xnew[j][i] = ( x[j][i] + x[j][i-1] + x[j][i+1] + x[j-1][i] + x[j+1][i] )/5.0;
}
}
#pragma omp barrier
if (thread_id == 0){
stencil_time += cpu_timer_stop(tstart_stencil);
SWAP_PTR(xnew, x, xtmp);
if (iter%1000 == 0) printf("Iter %d\n",iter);
}
#pragma omp barrier
}
} // end omp parallel
total_time += cpu_timer_stop(tstart_total);
printf("Timing is init %f flush %f stencil %f total %f\n",init_time,flush_time,stencil_time,total_time);
free(x);
free(xnew);
free(flush);
}
|
GB_unaryop__identity_int8_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int8_int32
// op(A') function: GB_tran__identity_int8_int32
// C type: int8_t
// A type: int32_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int8_int32
(
int8_t *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int8_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
HierarchicalModel.h | /*
Copyright (c) 2017, Fabian Prada
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer. Redistributions in binary form must reproduce
the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
Neither the name of the Johns Hopkins University nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#ifndef HIERARCHICAL_MODEL_INCLUDED
#define HIERARCHICAL_MODEL_INCLUDED
#include <Eigen/Sparse>
#include "Mesh.h"
#include "GeodesicDescriptors.h"
#include "RayTracer.h"
#include <unordered_set>
class RigidTransformation{
public:
RigidTransformation(){
rotation = Eigen::Matrix3f::Identity();
translation = Eigen::Vector3f(0, 0, 0);
}
RigidTransformation(Eigen::Matrix3f p_rotation, Eigen::Vector3f p_translation){
rotation = p_rotation;
translation = p_translation;
}
Eigen::Matrix3f rotation;
Eigen::Vector3f translation;
};
class IndexWeight{
public:
IndexWeight(int p_index, float p_weight){
index = p_index;
weight = p_weight;
}
int index;
float weight;
};
void TransformModel(const std::vector<Eigen::Vector3f> & restPosition, const std::vector<RigidTransformation> & transformations, const std::vector<std::vector<IndexWeight>> & parents, const std::vector<int> & fineIndices, std::vector<Eigen::Vector3f> & transformedPosition){
std::vector<Eigen::Vector3f> nodePositions(fineIndices.size());
for (int i = 0; i < fineIndices.size(); i++) nodePositions[i] = restPosition[fineIndices[i]];
int vCount = restPosition.size();
transformedPosition.resize(vCount);
int threads = omp_get_num_procs();
#pragma omp parallel for num_threads( threads )
for (int i = 0; i < vCount; i++){
Eigen::Vector3f cumDeformation(0, 0, 0);
for (int j = 0; j < parents[i].size(); j++){
int parentIndex = parents[i][j].index;
float parentWeight = parents[i][j].weight;
cumDeformation += (transformations[parentIndex].rotation*(restPosition[i] - nodePositions[parentIndex]) + transformations[parentIndex].translation + nodePositions[parentIndex])*parentWeight;
}
transformedPosition[i] = cumDeformation;
}
}
inline float ApproxGeodesicDistance(const int i, const int j, const Eigen::MatrixXf & geodesicDescriptor){
return (geodesicDescriptor.row(i) - geodesicDescriptor.row(j)).lpNorm<Eigen::Infinity>();
}
struct IndexedAngle{
float angle;
int index;
};
bool CompareIndexedAngle(const IndexedAngle & a, const IndexedAngle & b){
return a.angle < b.angle;
}
int ConstructProlongation(const SimpleMesh & mesh, const std::vector<bool> & fixedVertices, const Eigen::MatrixXf & geodesicDescriptor, const std::vector<std::vector<int>> & neighbours, const float radius, std::vector<int> & representatives,
std::vector<std::vector<IndexWeight>> & nodeParents, std::vector<std::vector<IndexWeight>> & nodeSons,
std::vector<std::vector<int>> & representativeNeighbours, std::vector<std::vector<TriangleIndex>> & representativeTriangles,
std::vector<RigidTransformation> & frames, std::vector<RigidTransformation> & transformations, std::vector<bool> & fixedRepresentative){
int vCount = neighbours.size();
std::vector<bool> isDeepCovered(vCount, false);
unsigned long long numRepresentatives = 0;
nodeParents.resize(vCount);
std::vector<float> cumParentWeight(vCount, 0);
int averageNumParents = 0;
std::vector<int> reducedIndex(vCount, -1);
int fixedCount = 0;
//Find representatives
for (int i = 0; i < vCount; i++){
if (!isDeepCovered[i]){
std::vector<IndexWeight> currentSons;
float cumSonsWeight = 0;
reducedIndex[i] = numRepresentatives;
representatives.push_back(i);
bool isFixed = false;
std::queue<int> visitingQueue;
visitingQueue.push(i);
std::unordered_set<int> alreadyAdded;
alreadyAdded.insert(i);
while (!visitingQueue.empty()){
int currentVertex = visitingQueue.front();
visitingQueue.pop();
float distance = ApproxGeodesicDistance(i, currentVertex, geodesicDescriptor);
if (distance <= 2.0*radius){
float weight = radius > 0 ? 1.0 - (distance / (2.0*radius)) : 1.0;
nodeParents[currentVertex].push_back(IndexWeight(numRepresentatives, weight));
cumParentWeight[currentVertex] += weight;
currentSons.push_back(IndexWeight(currentVertex, weight));
cumSonsWeight += weight;
if (fixedVertices[currentVertex]){
isFixed = true;
}
averageNumParents++;
if (distance <= radius){
isDeepCovered[currentVertex] = true;
}
const std::vector<int> & vertexNeighbours = neighbours[currentVertex];
for (int j = 0; j < vertexNeighbours.size(); j++){
int neighbourVertex = vertexNeighbours[j];
if (alreadyAdded.find(neighbourVertex) == alreadyAdded.end()){
alreadyAdded.insert(neighbourVertex);
visitingQueue.push(neighbourVertex);
}
}
}
}
//Normalize sons weight
for (int k = 0; k < currentSons.size(); k++){
currentSons[k].weight /= cumSonsWeight;
}
if (currentSons.size() < 4 && !(radius == 0) ){
printf("WARNING: Node with very few sons = %d! \n",currentSons.size());
}
nodeSons.push_back(currentSons);
fixedRepresentative.push_back(isFixed);
if (isFixed) fixedCount++;
numRepresentatives++;
}
}
if (radius == 0 && numRepresentatives != vCount){
printf("ERROR: Num representatives unexpected! \n");
return 0;
}
printf("Num representatives %d of %d \n", numRepresentatives, vCount);
printf("Num fixed nodes %d of %d \n", fixedCount, numRepresentatives);
printf("Averge num parents per node %d \n", averageNumParents / vCount);
printf("Averge num sons per representative %d \n", averageNumParents / numRepresentatives);
//Normalize parents weights
for (int i = 0; i < vCount; i++){
for (int j = 0; j < nodeParents[i].size(); j++){
nodeParents[i][j].weight /= cumParentWeight[i];
}
}
//Construct representative connectivity
int avergeRepresentativeDegree = 0;
if (radius != 0){
representativeNeighbours.resize(numRepresentatives);
for (int i = 0; i < numRepresentatives; i++){
std::queue<int> visitingQueue;
int root = representatives[i];
visitingQueue.push(root);
std::unordered_set<int> alreadyAdded;
alreadyAdded.insert(root);
while (!visitingQueue.empty()){
int currentVertex = visitingQueue.front();
visitingQueue.pop();
float distance = ApproxGeodesicDistance(root, currentVertex, geodesicDescriptor);
if (distance <= 2.0*radius){
int j = reducedIndex[currentVertex];
if (j != -1 && j != i && j < i){
representativeNeighbours[i].push_back(j);
representativeNeighbours[j].push_back(i);
avergeRepresentativeDegree += 2;
}
const std::vector<int> & vertexNeighbours = neighbours[currentVertex];
for (int k = 0; k < vertexNeighbours.size(); k++){
int neighbourVertex = vertexNeighbours[k];
if (alreadyAdded.find(neighbourVertex) == alreadyAdded.end()){
alreadyAdded.insert(neighbourVertex);
visitingQueue.push(neighbourVertex);
}
}
}
}
}
}
else{
representativeNeighbours = neighbours;
for (int i = 0; i < representativeNeighbours.size(); i++)avergeRepresentativeDegree += representativeNeighbours[i].size();
}
avergeRepresentativeDegree /= numRepresentatives;
printf("Averge representative degree %d \n", avergeRepresentativeDegree);
frames.resize(numRepresentatives);
transformations.resize(numRepresentatives);
//Construct triangles //WARNING: This may modify the triangles of the finest level
representativeTriangles.resize(numRepresentatives);
for (int i = 0; i < numRepresentatives; i++){
std::vector<int> currentNeighbours = representativeNeighbours[i];
if (currentNeighbours.size() > 3){
int currentFineIndex = representatives[i];
Eigen::Vector3f currentNormal = mesh.normals[currentFineIndex];
currentNormal /= currentNormal.norm();
Eigen::Vector3f tangentialDirection(2.f * ((float(rand()) / FLT_MAX) - 0.5f), 2.f * ((float(rand()) / FLT_MAX) - 0.5f), 2.f * ((float(rand()) / FLT_MAX) - 0.5f));
tangentialDirection = currentNormal.cross(tangentialDirection);
tangentialDirection /= tangentialDirection.norm();
Eigen::Vector3f biTangentialDirection = currentNormal.cross(tangentialDirection);
biTangentialDirection /= biTangentialDirection.norm();
Eigen::Matrix3f currentFrame;
currentFrame.col(0) = currentNormal;
currentFrame.col(1) = tangentialDirection;
currentFrame.col(2) = biTangentialDirection;
Eigen::Vector3f currentPos = mesh.vertices[currentFineIndex];
frames[i] = RigidTransformation(currentFrame, currentPos);
std::vector<IndexedAngle> angles(currentNeighbours.size());
for (int n = 0; n < currentNeighbours.size(); n++){
int j = currentNeighbours[n];
int neighbourhFineIndex = representatives[j];
Eigen::Vector3f neighbourPos = mesh.vertices[neighbourhFineIndex];
Eigen::Vector3f direction = neighbourPos - currentPos;
float projection[2] = { tangentialDirection.dot(direction), biTangentialDirection.dot(direction) };
float angle = atan2(projection[0], projection[1]);
angles[n].angle = angle;
angles[n].index = j;
}
std::sort(angles.begin(), angles.end(),CompareIndexedAngle);
for (int k = 0; k < angles.size(); k++) representativeTriangles[i].push_back(TriangleIndex(i, angles[k].index, angles[(k + 1) % currentNeighbours.size()].index));
}
else{
if(0) printf("WARNING: Lonely vertex! \n");
}
}
return 1;
}
class HierarchicalModel{
public:
int ConstructHierachy(const SimpleMesh & mesh, const std::vector<bool> & fixedVertices, const int levels, const Eigen::MatrixXf & geodesicDescriptor);
int numLevels;
std::vector<std::vector<int>> hierarchyFineIndices;
std::vector<std::vector<Eigen::Vector3f>> hierarchyReferencePosition;
std::vector<std::vector<std::vector<IndexWeight>>> hierarchicalParents;
std::vector<std::vector<std::vector<IndexWeight>>> hierarchicalSons;
std::vector<std::vector<std::vector<int>>> hierarchyNeighbourhoods;
std::vector<std::vector<std::vector<TriangleIndex>>> hierarchyTriangles;
std::vector<std::vector<RigidTransformation>> hierarchicalTransformation;
std::vector<std::vector<RigidTransformation>> hierarchicalFrames;
std::vector<std::vector<bool>> hierarchicalFixedNodes;
std::vector<float> hierarchicalSquaredEdgeLenght;
};
int HierarchicalModel::ConstructHierachy(const SimpleMesh & mesh, const std::vector<bool> & fixedVertices, const int levels, const Eigen::MatrixXf & geodesicDescriptor){
numLevels = levels;
int vCount = mesh.vertices.size();
int tCount = mesh.triangles.size();
std::vector<std::unordered_set<int>> _neighbours(vCount);
double averageEdgeLenght = 0;
for (int t = 0; t < tCount; t++){
for (int k = 0; k < 3; k++){
averageEdgeLenght += (mesh.vertices[mesh.triangles[t][(k + 1) % 3]] - mesh.vertices[mesh.triangles[t][k]]).norm();
_neighbours[mesh.triangles[t][k]].insert(mesh.triangles[t][(k + 1) % 3]);
_neighbours[mesh.triangles[t][k]].insert(mesh.triangles[t][(k + 2) % 3]);
}
}
std::vector<std::vector<int>> neighbours(vCount);
for (int i = 0; i < vCount; i++)neighbours[i] = std::vector<int>(_neighbours[i].begin(), _neighbours[i].end());
averageEdgeLenght /= float(3 * mesh.triangles.size());
printf("Average edge length %g\n", averageEdgeLenght);
hierarchyFineIndices.resize(numLevels);
hierarchyNeighbourhoods.resize(numLevels);
hierarchicalParents.resize(numLevels);
hierarchicalSons.resize(numLevels);
hierarchyTriangles.resize(numLevels);
hierarchicalTransformation.resize(numLevels);
hierarchicalFrames.resize(numLevels);
hierarchicalFixedNodes.resize(numLevels);
for (int l = 0; l < numLevels; l++){
printf("Level %d \n", l);
float radius = l > 0 ? averageEdgeLenght * float(1 << l) : 0;
if (!ConstructProlongation(mesh, fixedVertices, geodesicDescriptor, neighbours, radius, hierarchyFineIndices[l], hierarchicalParents[l], hierarchicalSons[l], hierarchyNeighbourhoods[l], hierarchyTriangles[l], hierarchicalFrames[l], hierarchicalTransformation[l], hierarchicalFixedNodes[l])){
printf("Failed construction of level %d \n",l);
return 0;
}
}
hierarchyReferencePosition.resize(numLevels);
for (int l = 0; l < numLevels; l++){
hierarchyReferencePosition[l].resize(hierarchyFineIndices[l].size());
for (int i = 0; i < hierarchyFineIndices[l].size(); i++){
hierarchyReferencePosition[l][i] = mesh.vertices[hierarchyFineIndices[l][i]];
}
}
hierarchicalSquaredEdgeLenght.resize(numLevels);
printf("Squared edge lengths: \n");
for (int l = 0; l < numLevels; l++){
float cumSquaredEdgeLenght = 0;
const std::vector<std::vector<int>> & neighbourhoods = hierarchyNeighbourhoods[l];
const std::vector<Eigen::Vector3f> & nodeReferencePosition = hierarchyReferencePosition[l];
for (int j = 0; j < neighbourhoods.size(); j++){
Eigen::Vector3f refPosCurrent = nodeReferencePosition[j];
for (int n = 0; n < neighbourhoods[j].size(); n++){
int k = neighbourhoods[j][n];
Eigen::Vector3f refPosNeighbour = nodeReferencePosition[k];
cumSquaredEdgeLenght += (refPosCurrent - refPosNeighbour).norm();
}
}
hierarchicalSquaredEdgeLenght[l] = cumSquaredEdgeLenght;
printf("Level %d = %g \n", l, cumSquaredEdgeLenght);
}
return 1;
}
#endif //HIERARCHICAL_MODEL_INCLUDED |
fdtd2d.c | /**
* fdtd2d.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 10.05
/* Problem size. */
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define tmax 500
#define NX SIZE
#define NY SIZE
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey,
DATA_TYPE *hz) {
int i, j;
for (i = 0; i < tmax; i++) {
_fict_[i] = (DATA_TYPE)i;
}
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
ex[i * NY + j] = ((DATA_TYPE)i * (j + 1) + 1) / NX;
ey[i * NY + j] = ((DATA_TYPE)(i - 1) * (j + 2) + 2) / NX;
hz[i * NY + j] = ((DATA_TYPE)(i - 9) * (j + 4) + 3) / NX;
}
}
}
void init_array_hz(DATA_TYPE *hz) {
int i, j;
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
hz[i * NY + j] = ((DATA_TYPE)(i - 9) * (j + 4) + 3) / NX;
}
}
}
int compareResults(DATA_TYPE *hz1, DATA_TYPE *hz2) {
int i, j, fail;
fail = 0;
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
if (percentDiff(hz1[i * NY + j], hz2[i * NY + j]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
return fail;
}
void runFdtd(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz) {
int t, i, j;
for (t = 0; t < tmax; t++) {
for (j = 0; j < NY; j++) {
ey[0 * NY + j] = _fict_[t];
}
for (i = 1; i < NX; i++) {
for (j = 0; j < NY; j++) {
ey[i * NY + j] =
ey[i * NY + j] - 0.5 * (hz[i * NY + j] - hz[(i - 1) * NY + j]);
}
}
for (i = 0; i < NX; i++) {
for (j = 1; j < NY; j++) {
ex[i * (NY + 1) + j] = ex[i * (NY + 1) + j] -
0.5 * (hz[i * NY + j] - hz[i * NY + (j - 1)]);
}
}
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
hz[i * NY + j] =
hz[i * NY + j] -
0.7 * (ex[i * (NY + 1) + (j + 1)] - ex[i * (NY + 1) + j] +
ey[(i + 1) * NY + j] - ey[i * NY + j]);
}
}
}
}
void runFdtd_OMP(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey,
DATA_TYPE *hz) {
int t, i, j;
#pragma omp target device(DEVICE_ID)
#pragma omp target map(to : _fict_[ : tmax], ex[ : (NX *(NY + 1))], \
ey[ : ((NX + 1) * NY)])
#pragma omp target map(tofrom : hz[ : (NX *(NY + 1))])
{
for (t = 0; t < tmax; t++) {
#pragma omp parallel for
for (j = 0; j < NY; j++) {
ey[0 * NY + j] = _fict_[t];
}
#pragma omp parallel for collapse(2)
for (i = 1; i < NX; i++) {
for (j = 0; j < NY; j++) {
ey[i * NY + j] =
ey[i * NY + j] - 0.5 * (hz[i * NY + j] - hz[(i - 1) * NY + j]);
}
}
#pragma omp parallel for collapse(2)
for (i = 0; i < NX; i++) {
for (j = 1; j < NY; j++) {
ex[i * (NY + 1) + j] = ex[i * (NY + 1) + j] -
0.5 * (hz[i * NY + j] - hz[i * NY + (j - 1)]);
}
}
#pragma omp parallel for collapse(2)
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
hz[i * NY + j] =
hz[i * NY + j] -
0.7 * (ex[i * (NY + 1) + (j + 1)] - ex[i * (NY + 1) + j] +
ey[(i + 1) * NY + j] - ey[i * NY + j]);
}
}
}
}
}
int main() {
double t_start, t_end;
int fail = 0;
DATA_TYPE *_fict_;
DATA_TYPE *ex;
DATA_TYPE *ey;
DATA_TYPE *hz;
DATA_TYPE *hz_outputFromGpu;
_fict_ = (DATA_TYPE *)malloc(tmax * sizeof(DATA_TYPE));
ex = (DATA_TYPE *)malloc(NX * (NY + 1) * sizeof(DATA_TYPE));
ey = (DATA_TYPE *)malloc((NX + 1) * NY * sizeof(DATA_TYPE));
hz = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE));
hz_outputFromGpu = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE));
fprintf(stdout, "<< 2-D Finite Different Time Domain Kernel >>\n");
init_arrays(_fict_, ex, ey, hz);
init_array_hz(hz_outputFromGpu);
t_start = rtclock();
runFdtd_OMP(_fict_, ex, ey, hz_outputFromGpu);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
t_start = rtclock();
runFdtd(_fict_, ex, ey, hz);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(hz, hz_outputFromGpu);
#endif
free(_fict_);
free(ex);
free(ey);
free(hz);
free(hz_outputFromGpu);
return fail;
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_binop__lor_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_01__lor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__lor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_uint32)
// A*D function (colscale): GB (_AxD__lor_uint32)
// D*A function (rowscale): GB (_DxB__lor_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_uint32)
// C=scalar+B GB (_bind1st__lor_uint32)
// C=scalar+B' GB (_bind1st_tran__lor_uint32)
// C=A+scalar GB (_bind2nd__lor_uint32)
// C=A'+scalar GB (_bind2nd_tran__lor_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_UINT32 || GxB_NO_LOR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
PoW.c | /* Copyright 2016-2018 The Ulord Core Foundation */
#include "PoW.h"
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
// #include <omp.h>
#include "my_time.h"
#include "common.h"
#include "my_rand48_r.h"
#include "oneWayFunction.h"
/*
* Step 1: Initialize working memory.
*/
void initWorkMemory(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, const uint32_t K) {
uint32_t i, j, k;
#ifdef _MSC_VER
__declspec(align(16)) uint8_t a[OUTPUT_LEN], b[OUTPUT_LEN], a_rrs[OUTPUT_LEN], b_rrs[OUTPUT_LEN];
#else
__attribute__((aligned(16))) uint8_t a[OUTPUT_LEN], b[OUTPUT_LEN], a_rrs[OUTPUT_LEN], b_rrs[OUTPUT_LEN];
#endif
uint64_t *a_u64 = (uint64_t *)a, *b_u64 = (uint64_t *)b;
funcInfor[0].func(input, inputLen, a);
uint64_t randSeed[4] = {0, 0, 0, 0};
const uint32_t iterNum = WORK_MEMORY_SIZE >> 5;
for (i = 0; i < iterNum; i += K) {
uint8_t t = 0, shift_num = 0;
reduce_bit(a, 32, (uint8_t *)&t, 8);
t = (t & 0x0f) ^ (t >> 4);
// reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8);
shift_num = reduce32_8bits(i);
// rrs(a, OUTPUT_LEN, a_rrs, shift_num);
rrs_32Byte_256(a, a_rrs, shift_num);
funcInfor[t].func(a_rrs, 32, a);
reduce_bit(a, 8, (uint8_t *)&randSeed[0], 48);
reduce_bit(a + 8, 8, (uint8_t *)&randSeed[1], 48);
reduce_bit(a + 16, 8, (uint8_t *)&randSeed[2], 48);
reduce_bit(a + 24, 8, (uint8_t *)&randSeed[3], 48);
#ifndef SSE_VERSION
struct my_rand48_data randBuffer[4];
my_seed48_r(randSeed[0], &randBuffer[0]);
my_seed48_r(randSeed[1], &randBuffer[1]);
my_seed48_r(randSeed[2], &randBuffer[2]);
my_seed48_r(randSeed[3], &randBuffer[3]);
#else
struct vrand48_data randBuffer[2];
vseed48(randSeed , &randBuffer[0]);
vseed48(randSeed + 2, &randBuffer[1]);
#endif
memcpy(Maddr + (i << 5), a, 32*sizeof(uint8_t));
for (k = 1; k < K; ++k) {
#ifndef SSE_VERSION
my_rand64_r(&randBuffer[0], &b_u64[0]);
my_rand64_r(&randBuffer[1], &b_u64[1]);
my_rand64_r(&randBuffer[2], &b_u64[2]);
my_rand64_r(&randBuffer[3], &b_u64[3]);
uint8_t shift_num;
uint8_t result[OUTPUT_LEN];
// reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8);
shift_num = reduce32_8bits(i + k);
// rrs(b, OUTPUT_LEN, result, shift_num);
rrs_32Byte_256(b, b_rrs, shift_num);
uint64_t *b_rrs_u64 = (uint64_t *)b_rrs;
a_u64[0] ^= b_rrs_u64[0];
a_u64[1] ^= b_rrs_u64[1];
a_u64[2] ^= b_rrs_u64[2];
a_u64[3] ^= b_rrs_u64[3];
#else
vrand64(b, randBuffer);
uint8_t shift_num;
shift_num = reduce32_8bits(i + k);
// rrs(b, OUTPUT_LEN, result, shift_num);
rrs_32Byte_256(b, b_rrs, shift_num);
__m128i val = _mm_load_si128((__m128i *)a);
__m128i vah = _mm_load_si128((__m128i *)(a + 16));
__m128i vresultl = _mm_load_si128((__m128i *)b_rrs);
__m128i vresulth = _mm_load_si128((__m128i *)(b_rrs + 16));
vresultl = _mm_xor_si128(val, vresultl);
vresulth = _mm_xor_si128(vah, vresulth);
_mm_store_si128((__m128i *)a, vresultl);
_mm_store_si128((__m128i *)(a + 16), vresulth);
#endif
memcpy(Maddr + ((i + k) << 5), b_rrs, OUTPUT_LEN*sizeof(uint8_t));
}
}
}
/*
* Step 2: Modify the working memory contents.
*/
void modifyWorkMemory(uint8_t *Maddr, const uint32_t L, const uint32_t C,
uint8_t *result) {
uint32_t i, j;
#ifdef _MSC_VER
__declspec(align(16)) uint8_t a[OUTPUT_LEN], b[64], a_rrs[OUTPUT_LEN];
#else
__attribute__((aligned(16))) uint8_t a[OUTPUT_LEN], b[64], a_rrs[OUTPUT_LEN];
#endif
uint64_t *a_u64 = (uint64_t *)a, *b_u64 = (uint64_t *)b;
funcInfor[0].func(Maddr + WORK_MEMORY_SIZE - 32, 32, a);
memcpy(result, a, OUTPUT_LEN*sizeof(uint8_t));
uint64_t r = 0;
reduce_bit(a, 32, (uint8_t *)&r, 64);
const uint32_t iterNum = L << 6;
for (i = 0; i < C; ++i) {
uint64_t randSeed = 0;
reduce_bit(a, 32, (uint8_t *)&randSeed, 48);
struct my_rand48_data randBuffer;
my_seed48_r(randSeed, &randBuffer);
uint8_t t1, t2, s;
uint64_t randNum = 0, base = 0;
for (j = 0; j < iterNum; ++j) {
my_rand48_r(&randBuffer, &randNum);
base = randNum + r;
uint64_t offset = 0;
// reduce_bit((uint8_t *)&r, 8, (uint8_t *)&offset, 8);
offset = reduce64_8bits(r);
offset = (offset << 8) + 1;
uint64_t addr1 = (base + WORK_MEMORY_SIZE - offset) % WORK_MEMORY_SIZE;
uint64_t addr2 = (base + offset) % WORK_MEMORY_SIZE;
t1 = Maddr[addr1];
t2 = Maddr[addr2];
s = a[j & 0x1f];
Maddr[addr1] = t2 ^ s;
Maddr[addr2] = t1 ^ s;
b[j & 0x3f] = t1 ^ t2;
r = r + s + t1 + t2;
}
uint8_t t = 0;
// reduce_bit((uint8_t *)&r, 8, (uint8_t *)&t, 8);
// t = (t & 0x0f) ^ (t >> 4);
t = reduce64_4bits(r);
reduce_bit(b, 64, a, 256);
uint8_t shift_num = 0;
uint64_t ir = r + i;
// reduce_bit((uint8_t *)&ir, 8, (uint8_t *)&shift_num, 8);
shift_num = reduce64_8bits(ir);
// rrs(a, OUTPUT_LEN, a_rrs, shift_num);
rrs_32Byte_256(a, a_rrs, shift_num);
funcInfor[t].func(a_rrs, 32, a);
uint64_t *result_u64 = (uint64_t *)result;
result_u64[0] ^= a_u64[0];
result_u64[1] ^= a_u64[1];
result_u64[2] ^= a_u64[2];
result_u64[3] ^= a_u64[3];
}
}
/*
* Step 3: Calculate the final result.
*/
void calculateFinalResult(uint8_t *Maddr, uint8_t *c, const uint32_t D, uint8_t *output) {
uint32_t i = 0, j = 0, k = 0;
#ifdef _MSC_VER
__declspec(align(16)) uint8_t result[OUTPUT_LEN], result_rrs[OUTPUT_LEN];
#else
__attribute__((aligned(16))) uint8_t result[OUTPUT_LEN], result_rrs[OUTPUT_LEN];
#endif
uint64_t *Maddr_u64 = (uint64_t *)Maddr;
uint64_t *result_u64 = (uint64_t *)result;
const uint32_t num = (WORK_MEMORY_SIZE >> 5) - 1;
uint32_t it = 0;
memcpy(result, c, OUTPUT_LEN * sizeof(uint8_t));
while(1) {
uint8_t t = 0, shift_num = 0;
uint32_t d = 0;
reduce_bit(result, 32, (uint8_t *)&t, 8);
t = (t & 0x0f) ^ (t >> 4);
reduce_bit(result, 32, (uint8_t *)&d, D);
++d;
for (j = 0; j < d; ++j) {
uint32_t index = i << 2;
result_u64[0] ^= Maddr_u64[index + 0];
result_u64[1] ^= Maddr_u64[index + 1];
result_u64[2] ^= Maddr_u64[index + 2];
result_u64[3] ^= Maddr_u64[index + 3];
++i;
if (i == num) {
it = i + t;
// reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8);
shift_num = reduce32_8bits(it);
// rrs(result, OUTPUT_LEN, result_rrs, shift_num);
rrs_32Byte_256(result, result_rrs, shift_num);
funcInfor[0].func(result_rrs, 32, result);
memcpy(output, result, OUTPUT_LEN * sizeof(uint8_t));
return;
}
}
it = t + i;
// reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8);
shift_num = reduce32_8bits(it);
// rrs(result, OUTPUT_LEN, result_rrs, shift_num);
rrs_32Byte_256(result, result_rrs, shift_num);
funcInfor[t].func(result_rrs, 32, result);
}
}
/*
* Correctness & Performance test for Proof of work
*/
/*
void testPowFunction(uint8_t *mess, uint32_t messLen, const int64_t iterNum) {
int64_t j;
uint32_t inputLen = messLen;
uint8_t input[INPUT_LEN], output[OUTPUT_LEN];
memset(input, 0, INPUT_LEN*sizeof(uint8_t));
memcpy(input, mess, messLen*sizeof(char));
// Init all one-way function
initOneWayFunction();
uint8_t *Maddr = (uint8_t *)malloc(64 * WORK_MEMORY_SIZE*sizeof(uint8_t));
assert(NULL != Maddr);
memset(Maddr, 0, 64 * WORK_MEMORY_SIZE*sizeof(uint8_t));
printf("****************************** Correctness test (PoW function) ******************************\n");
printf("Test message: %s\n", mess);
powFunction(input, inputLen, Maddr, output);
view_data_u8("PoW", output, OUTPUT_LEN);
printf("*********************************************************************************************\n");
printf("*************************************************** Performance test (PoW function) ***************************************************\n");
uint8_t *result = (uint8_t *)malloc(iterNum * OUTPUT_LEN * sizeof(uint8_t));
assert(NULL != result);
memset(result, 0, iterNum * OUTPUT_LEN * sizeof(uint8_t));
uint32_t threadNumArr[] = {1, 4, 8, 12, 16, 20, 24, 32, 48, 64};
uint32_t threadNumTypes = sizeof(threadNumArr) / sizeof(uint32_t);
printf(" %-18s", "Algorithm");
for (uint32_t ix = 0; ix < threadNumTypes; ++ix)
printf("%12d", threadNumArr[ix]);
printf("\n");
printf("00 %-18s\t", "PoW");
for (uint32_t ix = 0; ix < threadNumTypes; ++ix) {
omp_set_num_threads(threadNumArr[ix]);
double startTime = get_wall_time();
if (threadNumArr[ix] == 1) {
for (j = 0; j < iterNum; ++j) {
powFunction(input, inputLen, Maddr, result + j * OUTPUT_LEN);
}
} else {
#pragma omp parallel for firstprivate(input), private(j) shared(result)
for (j = 0; j < iterNum; ++j) {
powFunction(input, inputLen, Maddr + omp_get_thread_num() * WORK_MEMORY_SIZE, result + j * OUTPUT_LEN);
}
}
double endTime = get_wall_time();
double costTime = endTime - startTime;
printf("%5.0f bps ", iterNum / costTime); fflush(stdout);
// Check result
for (j = 0; j < iterNum; j += 1) {
if (memcmp(output, result + j * OUTPUT_LEN, OUTPUT_LEN)) {
printf("Thread num: %d, j: %ld\n", threadNumArr[ix], j);
view_data_u8("output", output, OUTPUT_LEN);
view_data_u8("result", result + j * OUTPUT_LEN, OUTPUT_LEN);
abort();
}
}
}
printf("\n");
printf("***************************************************************************************************************************************\n");
if (NULL != result) {
free(result);
result = NULL;
}
if (NULL != Maddr) {
free(Maddr);
Maddr = NULL;
}
}
*/
#define OUTPUT_BUFFER_SIZE (32 * 1024UL * 1024UL)
#define MAX_TEST_INPUT_LEN 140
#define MAX_OUT_FILE_NAME_LEN 25
const char testInputCase[][MAX_TEST_INPUT_LEN] = {
"",
"HelloWorld",
"0123456789"
};
void powNistTest(const char *outFileName) {
const uint64_t iterNum = 1024UL * 1024UL;
// const uint64_t iterNum = 1024UL;
uint8_t *outputBuffer = (uint8_t *)malloc(OUTPUT_BUFFER_SIZE * sizeof(uint8_t));
assert(NULL != outputBuffer);
memset(outputBuffer, 0, OUTPUT_BUFFER_SIZE * sizeof(uint8_t));
uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t));
assert(NULL != Maddr);
memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t));
initOneWayFunction();
uint32_t testInputCaseNum = sizeof(testInputCase) / sizeof(const char [MAX_TEST_INPUT_LEN]);
for (uint32_t testCaseIx = 0; testCaseIx < testInputCaseNum; ++testCaseIx) {
char curOutFileName[MAX_OUT_FILE_NAME_LEN] = "";
sprintf(curOutFileName, "%s-%u.txt", outFileName, testCaseIx);
FILE *fp = NULL;
if (NULL != (fp = fopen(curOutFileName, "wb"))) {
const uint32_t testInputCaseLen = strlen((char *)testInputCase[testCaseIx]);
uint8_t input[MAX_TEST_INPUT_LEN];
memset(input, 0, MAX_TEST_INPUT_LEN*sizeof(uint8_t));
memcpy(input, testInputCase[testCaseIx], testInputCaseLen*sizeof(uint8_t));
double startTime = get_wall_time();
powFunction(input, testInputCaseLen, Maddr, outputBuffer);
for (uint64_t i = 1, j = 0; i < iterNum; ++i) {
memcpy(input, outputBuffer + j, OUTPUT_LEN * sizeof(uint32_t));
j += OUTPUT_LEN;
powFunction(input, OUTPUT_LEN, Maddr, outputBuffer + j);
/* if (j == OUTPUT_BUFFER_SIZE) {
fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp);
j = 0;
} */
}
double endTime = get_wall_time();
double costTime = endTime - startTime;
fprintf(stdout, "TestCaseIx: %d, Input: %s, IterNum: %lu, Time: %4.2f, Performance: %5.2f bps\n", testCaseIx, \
testInputCase[testCaseIx], iterNum, costTime, ((double)(iterNum * OUTPUT_LEN)) / costTime); fflush(stdout);
fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp);
fclose(fp);
} else {
fprintf(stderr, "Error: Open %s failed!\n", curOutFileName);
abort();
}
}
if (NULL != outputBuffer) {
free(outputBuffer);
outputBuffer = NULL;
}
if (NULL != Maddr) {
free(Maddr);
Maddr = NULL;
}
}
|
mandel-omp.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
for (row = 0; row < height; ++row) {
for (col = 0; col < width; ++col) {
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
#pragma omp critical
{
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
correlation.c | /**
* correlation.c This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
// define the error threshold for the results "not matching"
#define ERROR_THRESHOLD 1.05
/* Problem size. */
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
/* Problem size */
#define M SIZE
#define N SIZE
#define sqrt_of_array_cell(x, j) sqrt(x[j])
#define FLOAT_N 3214212.01f
#define EPS 0.005f
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *data) {
int i, j;
for (i = 0; i < (M + 1); i++) {
for (j = 0; j < (N + 1); j++) {
data[i * (N + 1) + j] = ((DATA_TYPE)i * j) / (M + 1);
}
}
}
void correlation(DATA_TYPE *data, DATA_TYPE *mean, DATA_TYPE *stddev,
DATA_TYPE *symmat) {
int i, j, j1, j2;
// Determine mean of column vectors of input data matrix
for (j = 1; j < (M + 1); j++) {
mean[j] = 0.0;
for (i = 1; i < (N + 1); i++) {
mean[j] += data[i * (M + 1) + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
// Determine standard deviations of column vectors of data matrix.
for (j = 1; j < (M + 1); j++) {
stddev[j] = 0.0;
for (i = 1; i < (N + 1); i++) {
stddev[j] +=
(data[i * (M + 1) + j] - mean[j]) * (data[i * (M + 1) + j] - mean[j]);
}
stddev[j] /= FLOAT_N;
stddev[j] = sqrt_of_array_cell(stddev, j);
stddev[j] = stddev[j] <= EPS ? 1.0 : stddev[j];
}
// i - threadIdx.x, j = threadIdx.y
// Center and reduce the column vectors.
for (i = 1; i < (N + 1); i++) {
for (j = 1; j < (M + 1); j++) {
data[i * (M + 1) + j] -= mean[j];
data[i * (M + 1) + j] /= (sqrt(FLOAT_N) * stddev[j]);
}
}
// Calculate the m * m correlation matrix.
for (j1 = 1; j1 < M; j1++) {
symmat[j1 * (M + 1) + j1] = 1.0;
for (j2 = j1 + 1; j2 < (M + 1); j2++) {
symmat[j1 * (M + 1) + j2] = 0.0;
for (i = 1; i < (N + 1); i++) {
symmat[j1 * (M + 1) + j2] +=
(data[i * (M + 1) + j1] * data[i * (M + 1) + j2]);
}
symmat[j2 * (M + 1) + j1] = symmat[j1 * (M + 1) + j2];
}
}
symmat[M * (M + 1) + M] = 1.0;
}
void correlation_OMP(DATA_TYPE *data, DATA_TYPE *mean, DATA_TYPE *stddev,
DATA_TYPE *symmat) {
int i, j, k;
// Determine mean of column vectors of input data matrix
#pragma omp target map(tofrom : data[ : (M + 1) * (N + 1)], \
mean[ : (M + 1)]) device(DEVICE_ID)
#pragma omp parallel for // schedule(auto)
for (j = 1; j < (M + 1); j++) {
mean[j] = 0.0;
for (i = 1; i < (N + 1); i++) {
mean[j] += data[i * (M + 1) + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
// Determine standard deviations of column vectors of data matrix.
for (j = 1; j < (M + 1); j++) {
stddev[j] = 0.0;
for (i = 1; i < (N + 1); i++) {
stddev[j] +=
(data[i * (M + 1) + j] - mean[j]) * (data[i * (M + 1) + j] - mean[j]);
}
stddev[j] /= FLOAT_N;
stddev[j] = sqrt(stddev[j]);
if (stddev[j] <= EPS) {
stddev[j] = 1.0;
}
}
// Center and reduce the column vectors.
for (i = 1; i < (N + 1); i++) {
for (j = 1; j < (M + 1); j++) {
data[i * (M + 1) + j] -= mean[j];
data[i * (M + 1) + j] /= (sqrt(FLOAT_N) * stddev[j]);
}
}
// Calculate the m * m correlation matrix.
#pragma omp target map(to : data[ : (M + 1) * (N + 1)]) map( \
tofrom : symmat[ : (M + 1) * (N + 1)]) device(DEVICE_ID)
#pragma omp parallel for // collapse(1) schedule(static, 32)
for (k = 1; k < M; k++) {
symmat[k * (M + 1) + k] = 1.0;
for (j = k + 1; j < (M + 1); j++) {
symmat[k * (M + 1) + j] = 0.0;
for (i = 1; i < (N + 1); i++) {
symmat[k * (M + 1) + j] +=
(data[i * (M + 1) + k] * data[i * (M + 1) + j]);
}
symmat[j * (M + 1) + k] = symmat[k * (M + 1) + j];
}
}
symmat[M * (M + 1) + M] = 1.0;
}
int compareResults(DATA_TYPE *symmat, DATA_TYPE *symmat_outputFromGpu) {
int i, j, fail;
fail = 0;
for (i = 1; i < (M + 1); i++) {
for (j = 1; j < (N + 1); j++) {
if (percentDiff(symmat[i * (N + 1) + j],
symmat_outputFromGpu[i * (N + 1) + j]) >
ERROR_THRESHOLD) {
fail++;
// printf("i: %d j: %d\n1: %f 2: %f\n", i, j, symmat[i*N + j],
// symmat_GPU[i*N + j]);
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
ERROR_THRESHOLD, fail);
return fail;
}
int main() {
double t_start, t_end;
int fail = 0;
DATA_TYPE *data;
DATA_TYPE *mean;
DATA_TYPE *stddev;
DATA_TYPE *symmat;
DATA_TYPE *symmat_GPU;
data = (DATA_TYPE *)malloc((M + 1) * (N + 1) * sizeof(DATA_TYPE));
mean = (DATA_TYPE *)malloc((M + 1) * sizeof(DATA_TYPE));
stddev = (DATA_TYPE *)malloc((M + 1) * sizeof(DATA_TYPE));
symmat = (DATA_TYPE *)malloc((M + 1) * (N + 1) * sizeof(DATA_TYPE));
symmat_GPU = (DATA_TYPE *)malloc((M + 1) * (N + 1) * sizeof(DATA_TYPE));
fprintf(stdout, "<< Correlation Computation >>\n");
init_arrays(data);
t_start = rtclock();
correlation_OMP(data, mean, stddev, symmat_GPU);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
init_arrays(data);
t_start = rtclock();
correlation(data, mean, stddev, symmat);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(symmat, symmat_GPU);
#endif
free(data);
free(mean);
free(stddev);
free(symmat);
free(symmat_GPU);
return fail;
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevPreferredType = P.PreferredType;
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// otherwise, it is a tag declaration.
bool TemplateScope : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype,
SourceLocation SwiftNewtypeLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
void ParsePtrauthQualifier(ParsedAttributes &Attrs);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parses OpenMP context selectors and calls \p Callback for each
/// successfully parsed context selector.
bool
parseOpenMPContextSelectors(SourceLocation Loc,
SmallVectorImpl<Sema::OMPCtxSelectorData> &Data);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers>
MapTypeModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLastLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType(bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
/// Parse the given string as a type.
///
/// This is a dangerous utility function currently employed only by API notes.
/// It is not a general entry-point for safely parsing types from strings.
///
/// \param typeStr The string to be parsed as a type.
/// \param context The name of the context in which this string is being
/// parsed, which will be used in diagnostics.
/// \param includeLoc The location at which this parse was triggered.
TypeResult parseTypeFromString(StringRef typeStr, StringRef context,
SourceLocation includeLoc);
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
ExprResult ParseBuiltinPtrauthTypeDiscriminator();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
exercise3.c | /*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file exercise3.c
* @author Alessandro Capotondi
* @date 27 Mar 2020
* @brief Exercise 3
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <stdio.h>
#include <omp.h>
#include "utils.h"
/**
* @brief EX 3 - Data parallelism: unbalanced parallel loop ** 4 THREADS **
*
* a) Parallelize loop w/static scheduling
* b) Parallelize loop w/dynamic scheduling, for chunks of 32, 16, 8, 4 ,1 (128 iter)
*
* @return void
*/
void exercise()
{
//#pragma omp parallel for num_threads(4) schedule(static)
#pragma omp parallel for num_threads(4) schedule(dynamic, M)
for (int i = 0; i < 128; i++)
{
DEBUG_PRINT("%hu: I am executing iteration %hu!\n", omp_get_thread_num(), i);
work((i >> 2) * 1 << 20);
}
}
|
pi_omp_profesor.c | //gcc pi_omp_profesor.c -o x -fopenmp -lm && ./x
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <sys/time.h>
#define ITERATIONS 2e09
#define THREADS 8
#define PAD 8
int calculatePi(double *piTotal, int ID)
{ int start, end;
start = (ITERATIONS/omp_get_num_threads()) * ID;
end = (ITERATIONS/omp_get_num_threads()) * (ID + 1);
int i = start;
do{
*(piTotal + (ID*PAD)) = *(piTotal + (ID*PAD)) + (double)(4.0 / ((i*2)+1));
i++;
*(piTotal + (ID*PAD)) = *(piTotal + (ID*PAD)) - (double)(4.0 / ((i*2)+1));
i++;
}while(i < end);
return 0;
}
int main()
{
int i, threads = THREADS;
double pi[threads*PAD];
struct timeval tval_before, tval_after, tval_result;
gettimeofday(&tval_before, NULL);
for(i = 0; i < THREADS; i++)
pi[i*PAD] = 0;
#pragma omp parallel num_threads(threads)
{
int ID = omp_get_thread_num();
calculatePi(pi, ID);
}
for(i = 1; i < THREADS; i++)
{
*pi = *pi + *(pi + (i*PAD));
}
gettimeofday(&tval_after, NULL);
timersub(&tval_after, &tval_before, &tval_result);
printf("Time elapsed: %ld.%06ld\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec);
printf("pi: %2.10f \n", pi[0]);
}
|
enforce_detgammabar_constraint.h | void enforce_detgammabar_constraint(const int Nxx_plus_2NGHOSTS[3],REAL *xx[3], REAL *in_gfs) {
#pragma omp parallel for
for(int i2=0; i2<Nxx_plus_2NGHOSTS[2]; i2++) {
const REAL xx2 = xx[2][i2];
for(int i1=0; i1<Nxx_plus_2NGHOSTS[1]; i1++) {
const REAL xx1 = xx[1][i1];
for(int i0=0; i0<Nxx_plus_2NGHOSTS[0]; i0++) {
const REAL xx0 = xx[0][i0];
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 1: Read from main memory and compute finite difference stencils:
*/
const double hDD00 = in_gfs[IDX4(HDD00GF, i0,i1,i2)];
const double hDD01 = in_gfs[IDX4(HDD01GF, i0,i1,i2)];
const double hDD02 = in_gfs[IDX4(HDD02GF, i0,i1,i2)];
const double hDD11 = in_gfs[IDX4(HDD11GF, i0,i1,i2)];
const double hDD12 = in_gfs[IDX4(HDD12GF, i0,i1,i2)];
const double hDD22 = in_gfs[IDX4(HDD22GF, i0,i1,i2)];
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 1: Evaluate SymPy expressions and write to main memory:
*/
const double tmp0 = hDD00 + 1;
const double tmp1 = sin(xx1);
const double tmp2 = pow(tmp1, 2);
const double tmp3 = tmp2*pow(xx0, 4);
const double tmp4 = pow(xx0, 2);
const double tmp5 = hDD11*tmp4 + tmp4;
const double tmp6 = tmp2*tmp4;
const double tmp7 = hDD22*tmp6 + tmp6;
const double tmp8 = cbrt(1.0/(-pow(hDD01, 2)*tmp4*tmp7 + 2*hDD01*hDD02*hDD12*tmp3 - pow(hDD02, 2)*tmp5*tmp6 - pow(hDD12, 2)*tmp0*tmp3 + tmp0*tmp5*tmp7))*pow(fabs(tmp1), 2.0/3.0)*pow(fabs(xx0), 4.0/3.0);
in_gfs[IDX4(HDD00GF, i0, i1, i2)] = tmp0*tmp8 - 1;
in_gfs[IDX4(HDD01GF, i0, i1, i2)] = hDD01*tmp8;
in_gfs[IDX4(HDD02GF, i0, i1, i2)] = hDD02*tmp8;
in_gfs[IDX4(HDD11GF, i0, i1, i2)] = tmp8*(hDD11 + 1) - 1;
in_gfs[IDX4(HDD12GF, i0, i1, i2)] = hDD12*tmp8;
in_gfs[IDX4(HDD22GF, i0, i1, i2)] = tmp8*(hDD22 + 1) - 1;
} // END LOOP: for(int i0=0; i0<Nxx_plus_2NGHOSTS[0]; i0++)
} // END LOOP: for(int i1=0; i1<Nxx_plus_2NGHOSTS[1]; i1++)
} // END LOOP: for(int i2=0; i2<Nxx_plus_2NGHOSTS[2]; i2++)
}
|
decorate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: Define the width and height of the border.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->matte_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->matte_color=image->matte_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the matte_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
accentuate,
border,
highlight,
interior,
matte,
shadow,
trough;
register ssize_t
x;
size_t
bevel_width,
height,
width;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
width=frame_info->width-frame_info->x-bevel_width;
height=frame_info->height-frame_info->y-bevel_width;
if ((width < image->columns) || (height < image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass) == MagickFalse)
{
InheritException(exception,&frame_image->exception);
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if ((IsPixelGray(&frame_image->border_color) == MagickFalse) &&
(IsGrayColorspace(frame_image->colorspace) != MagickFalse))
(void) SetImageColorspace(frame_image,sRGBColorspace);
if ((frame_image->border_color.opacity != OpaqueOpacity) &&
(frame_image->matte == MagickFalse))
(void) SetImageAlphaChannel(frame_image,OpaqueAlphaChannel);
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
GetMagickPixelPacket(frame_image,&interior);
SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL,
&interior);
GetMagickPixelPacket(frame_image,&matte);
matte.colorspace=sRGBColorspace;
SetMagickPixelPacket(frame_image,&image->matte_color,(IndexPacket *) NULL,
&matte);
GetMagickPixelPacket(frame_image,&border);
border.colorspace=sRGBColorspace;
SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL,
&border);
GetMagickPixelPacket(frame_image,&accentuate);
accentuate.red=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate)));
accentuate.green=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate)));
accentuate.blue=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate)));
accentuate.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&highlight);
highlight.red=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.red+(QuantumRange*HighlightModulate)));
highlight.green=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.green+(QuantumRange*HighlightModulate)));
highlight.blue=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate)));
highlight.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&shadow);
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&trough);
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.opacity=matte.opacity;
if (image->colorspace == CMYKColorspace)
{
ConvertRGBToCMYK(&interior);
ConvertRGBToCMYK(&matte);
ConvertRGBToCMYK(&border);
ConvertRGBToCMYK(&accentuate);
ConvertRGBToCMYK(&highlight);
ConvertRGBToCMYK(&shadow);
ConvertRGBToCMYK(&trough);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
frame_view=AcquireAuthenticCacheView(frame_image,exception);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
register IndexPacket
*restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
if (q != (PixelPacket *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,frame_image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
/*
Set frame interior to interior color.
*/
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) || (image->matte != MagickFalse)))
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(frame_image,&interior,q,frame_indexes);
q++;
frame_indexes++;
}
else
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
(void) CopyMagickMemory(q,p,image->columns*sizeof(*p));
if ((image->colorspace == CMYKColorspace) &&
(frame_image->colorspace == CMYKColorspace))
{
(void) CopyMagickMemory(frame_indexes,indexes,image->columns*
sizeof(*indexes));
frame_indexes+=image->columns;
}
q+=image->columns;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FrameImage)
#endif
proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
register IndexPacket
*restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (PixelPacket *) NULL)
{
/*
Draw bottom of ornamental border.
*/
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) || (image->matte != MagickFalse)))
{
x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
(void) CompositeImage(frame_image,image->compose,image,x,y);
}
if (status == MagickFalse)
frame_image=DestroyImage(frame_image);
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=QuantumRange;
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q++;
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
GrB_Matrix_nvals.c | //------------------------------------------------------------------------------
// GrB_Matrix_nvals: number of entries in a sparse matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GrB_Matrix_nvals // get the number of entries in a matrix
(
GrB_Index *nvals, // matrix has nvals entries
const GrB_Matrix A // matrix to query
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GrB_Matrix_nvals (&nvals, A)") ;
GB_BURBLE_START ("GrB_Matrix_nvals") ;
GB_RETURN_IF_NULL_OR_FAULTY (A) ;
//--------------------------------------------------------------------------
// get the number of entries
//--------------------------------------------------------------------------
GrB_Info info = GB_nvals (nvals, A, Context) ;
GB_BURBLE_END ;
#pragma omp flush
return (info) ;
}
|
omp-task-fp.c | #include <stdio.h>
int main() {
int x = 10, i = 0;
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task
{
x = x + 1;
printf("x = %d\n", x);
}
#pragma omp taskwait
int j = 0;
for(i = 0; i < 4; i++)
{
#pragma omp task firstprivate(i, j)
{
j += i;
printf("x (%d) = %d\n", i, x + i );
printf("j (%d) = %d\n", i, j );
}
}
#pragma omp taskwait
printf(" j = %d\n",j);
}
}
printf("final x = %d\n", x);
return 0;
}
|
softmax_hcl_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#include <math.h>
#include <arm_neon.h>
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include "softmax_param.h"
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
int ret = 0;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
if (input_tensor->dims[0] != output_tensor->dims[0] || input_tensor->dims[1] != output_tensor->dims[1] || input_tensor->dims[2] != output_tensor->dims[2] ||
input_tensor->dims[3] != output_tensor->dims[3])
ret = set_ir_tensor_shape(output_tensor, input_tensor->dims, input_tensor->dim_num);
return ret;
}
static inline float32x4_t vexpq10_f32(float32x4_t x)
{
x = vmlaq_n_f32(vdupq_n_f32(1.0f), x, 0.0009765625f); // n = 10
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
return x;
}
static void GetMaxArray(float* input, float* array, int in_size, int on_size, int num_thread)
{
float* input_ptr = ( float* )input;
float* array_ptr = ( float* )array;
memset(array, 0, in_size * sizeof(float));
// #pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < on_size; j++)
{
// #pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < (in_size & -4); i += 4)
{
float32x4_t _p = vld1q_f32(array_ptr + i);
float32x4_t _in = vld1q_f32(input_ptr + j * in_size + i);
#ifdef __aarch64__
_p = vpmaxq_f32(_p, _in);
#else
_p = vmaxq_f32(_p, vrev64q_f32(_in));
_p = vmaxq_f32(_p, vextq_f32(_p, _in, 2));
#endif
vst1q_f32(array_ptr + i, _p);
}
for (int i = in_size & ~3; i < in_size; i++)
{
if (array_ptr[i] < input_ptr[j * in_size + i])
array_ptr[i] = input_ptr[j * in_size + i];
}
/*
for(int l = 0; l < in_size; l++)
{
if(array_ptr[l] < input_ptr[j * in_size + l])
array_ptr[l] = input_ptr[j * in_size + l];
}
*/
}
}
static void GetOutResult(float* input, float* output, float* maxarray, float* sum_array, int in_size, int on_size,
int num_thread)
{
float* input_ptr = ( float* )input;
float* output_ptr = ( float* )output;
float* maxarray_ptr = ( float* )maxarray;
float* sum_array_ptr = ( float* )sum_array;
memset(sum_array, 0x0, in_size * sizeof(float));
/* get the exp and the summary */
// #pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < on_size; j++)
{
// #pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < (in_size & -4); i += 4)
{
int index = j * in_size + i;
float32x4_t out = vexpq10_f32(vsubq_f32(vld1q_f32(input_ptr + index), vld1q_f32(maxarray_ptr + i)));
float32x4_t sum = vaddq_f32(vld1q_f32(sum_array_ptr + i), out);
vst1q_f32(output_ptr + index, out);
vst1q_f32(sum_array_ptr + i, sum);
}
for (int i = in_size & ~3; i < in_size; i++)
{
int index = j * in_size + i;
output_ptr[index] = exp(input_ptr[index] - maxarray_ptr[i]);
sum_array_ptr[i] += output_ptr[index];
}
}
/*
for(int l = 0; l < in_size; l++)
{
int index = j * in_size + l;
output_ptr[index] = exp(input_ptr[index] - array_ptr[l]);
sum_array_ptr[l] += output_ptr[index];
}
*/
/* the final result */
for (int j = 0; j < on_size; j++)
for (int l = 0; l < in_size; l++)
{
int index = j * in_size + l;
output_ptr[index] /= sum_array_ptr[l];
}
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct softmax_param* softmax_param = ( struct softmax_param* )ir_node->op.param_mem;
int element_size = input_tensor->elem_size;
int dims[4];
for (int i = 0; i < input_tensor->dim_num; i++)
{
dims[i] = input_tensor->dims[i];
}
int axis = softmax_param->axis;
int out_size, in_size, on_size;
out_size = 1;
for (int i = 0; i < axis; i++)
{
out_size *= dims[i];
}
in_size = 1;
for (size_t i = axis + 1; i < input_tensor->dim_num; i++)
{
in_size *= dims[i];
}
on_size = dims[axis];
uint8_t* input = input_tensor->data;
uint8_t* output = output_tensor->data;
float* max_array = ( float* )malloc(in_size * sizeof(float));
float* sum_array = ( float* )malloc(in_size * sizeof(float));
int on_in_size = on_size * in_size;
float* input_f = NULL;
float* output_f = NULL;
if (element_size == 1)
{
input_f = ( float* )malloc(on_in_size * 4);
output_f = ( float* )malloc(on_in_size * 4);
/* todo */
free(input_f);
free(output_f);
}
for (int i = 0; i < out_size; i++)
{
/* get max */
int img_base = i * on_in_size * element_size;
GetMaxArray(( float* )(input + img_base), max_array, in_size, on_size, exec_graph->num_thread);
GetOutResult(( float* )(input + img_base), ( float* )(output + img_base), max_array, sum_array, in_size,
on_size, exec_graph->num_thread);
}
free(max_array);
free(sum_array);
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
struct ir_node* ir_node = exec_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
/* todo support uint8 */
if (input_tensor->data_type != TENGINE_DT_FP32)
return 0;
return OPS_SCORE_BEST;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_softmax_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops);
}
static int unreg_softmax_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_softmax_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_softmax_hcl_ops);
|
vlad.c | /** @file vlad.c
** @brief VLAD - Declaration
** @author David Novotny
** @author Andrea Vedaldi
**/
/*
Copyright (C) 2013 David Novotny and Andera Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page vlad Vector of Locally Aggregated Descriptors (VLAD) encoding
@author David Novotny
@author Andrea Vedaldi
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@ref vlad.h implements the *Vector of Linearly Aggregated Descriptors*
(VLAD) image representation @cite{jegou10aggregating}
@cite{arandjelovic13all-about}.
@ref vlad-starting demonstreates how to use the C API to compute the
VLAD representation of an image. For further details on the VLAD image
representation refer to:
- @subpage vlad-fundamentals - VLAD definition and computation.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section vlad-starting Getting started with VLAD
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The VLAD encoding of a set of features is obtained by using the
function ::vl_vlad_encode. The function can be applied to both @c
float or @c double data types.
::vl_vlad_encode requires a visual dictionary, for example obtained by
using @ref kmeans. Furthermore, the assignments of features to
dictionary elements must be pre-computed, for example by using @ref
kdtree.
In the following example code, the vocabulary is first created using
the KMeans clustering, then the points, that are to be encoded are
assigned to its corresponding nearest vocabulary words, after that the
original vlad encoding routine without any normalization option takes place.
At the end of the process the encoding is stored in the @c enc variable.
@code
vl_uint32 * indexes;
float * assignments;
float * enc
int i;
// create a KMeans object and run clustering to get vocabulary words (centers)
kmeans = vl_kmeans_new (VLDistanceL2, VL_TYPE_FLOAT) ;
vl_kmeans_cluster (kmeans,
data,
dimension,
numData,
numCenters) ;
// find nearest cliuster centers for the data that should be encoded
indexes = vl_malloc(sizeof(vl_uint32) * numDataToEncode);
vl_kmeans_quantize(kmeans,indexes,dataToEncode,numDataToEncode);
// convert indexes array to assignments array,
// which can be processed by vl_vlad_encode
assignments = vl_malloc(sizeof(float) * numDataToEncode * numCenters);
memset(assignments, 0, sizeof(float) * numDataToEncode * numCenters);
for(i = 0; i < numDataToEncode; i++) {
assignments[i * numCenters + indexes[i]] = 1.;
}
// allocate space for vlad encoding
enc = vl_malloc(sizeof(TYPE) * dimension * numCenters);
// do the encoding job
vl_vlad_encode (enc, VL_F_TYPE,
vl_kmeans_get_centers(kmeans), dimension, numCenters,
data, numData,
assignments,
0) ;
@endcode
Various @ref vlad-normalization normalizations can be applied to the
VLAD vectors. These are controlled by the parameter @a flag of
::vl_vlad_encode.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page vlad-fundamentals VLAD fundamentals
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
This page describes the *Vector of Locally Aggregated Descriptors*
(VLAD) image encoding of @cite{jegou10aggregating}. See @ref vlad for
an overview of the C API.
VLAD is a *feature encoding and pooling* method, similar to @ref
fisher "Fisher vectors". VLAD encodes a set of local feature
descriptors $I=(\bx_1,\dots,\bx_n)$ extracted from an image using a
dictionary built using a clustering method such as @ref gmm or @ref
kmeans. Let $q_{ik}$ be the strength of the association of data vector
$\bx_i$ to cluster $\mu_k$, such that $q_{ik} \geq 0$ and
$\sum_{k=1}^K q_{ik} = 1$. The association may be either soft
(e.g. obtained as the posterior probabilities of the GMM clusters) or
hard (e.g. obtained by vector quantization with K-means).
$\mu_k$ are the cluster *means*, vectors of the same dimension as the
data $\bx_i$. VLAD encodes feature $\bx$ by considering the *residuals*
\[
\bv_k = \sum_{i=1}^{N} q_{ik} (\bx_{i} - \mu_k).
\]
The residulas are stacked together to obtain the vector
\[
\hat\Phi(I) =
\begin{bmatrix}
\vdots \\
\bv_k \\
\vdots
\end{bmatrix}
\]
Before the VLAD encoding is used it is usually normalized, as
explained @ref vlad-normalization next.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section vlad-normalization VLAD normalization
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
VLFeat VLAD implementation supports a number of different
normalization strategies. These are optionally applied in this order:
- **Component-wise mass normalization.** Each vector $\bv_k$ is
divided by the total mass of features associated to it $\sum_{i=1}^N
q_{ik}$.
- **Square-rooting.** The function $\sign(z)\sqrt{|z|}$ is applied to
all scalar components of the VLAD descriptor.
- **Component-wise $l^2$ normalization.** The vectors $\bv_k$ are
divided by their norm $\|\bv_k\|_2$.
- **Global $l^2$ normalization.** The VLAD descriptor $\hat\Phi(I)$ is
divided by its norm $\|\hat\Phi(I)\|_2$.
*/
#include "vlad.h"
#include "mathop.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
/* ================================================================ */
#ifdef VL_VLAD_INSTANTIATING
static void
VL_XCAT(_vl_vlad_encode_, SFX)
(TYPE * enc,
TYPE const * means, vl_size dimension, vl_size numClusters,
TYPE const * data, vl_size numData,
TYPE const * assignments,
int flags)
{
vl_uindex dim ;
vl_index i_cl=0, i_d ;
memset(enc, 0, sizeof(TYPE) * dimension * numClusters) ;
#if defined(_OPENMP)
#pragma omp parallel for default(shared) private(i_cl,i_d,dim) num_threads(vl_get_max_threads())
#endif
for (i_cl = 0; i_cl < (signed)numClusters; i_cl++) {
double clusterMass = 0 ;
for (i_d = 0; i_d < (signed)numData; i_d++) {
if (assignments[i_d*numClusters + i_cl] > 0) {
double q = assignments[i_d*numClusters+i_cl] ;
clusterMass += q ;
for(dim = 0; dim < dimension; dim++) {
enc [i_cl * dimension + dim] += q * data [i_d * dimension + dim] ;
}
}
}
if (clusterMass > 0) {
if (flags & VL_VLAD_FLAG_NORMALIZE_MASS) {
for(dim = 0; dim < dimension; dim++) {
enc[i_cl*dimension + dim] /= clusterMass ;
enc[i_cl*dimension + dim] -= means[i_cl*dimension+dim];
}
} else {
for(dim = 0; dim < dimension; dim++) {
enc[i_cl*dimension + dim] -= clusterMass * means[i_cl*dimension+dim];
}
}
}
if (flags & VL_VLAD_FLAG_SQUARE_ROOT) {
for(dim = 0; dim < dimension; dim++) {
TYPE z = enc[i_cl*dimension + dim] ;
if (z >= 0) {
enc[i_cl*dimension + dim] = VL_XCAT(vl_sqrt_, SFX)(z) ;
} else {
enc[i_cl*dimension + dim] = - VL_XCAT(vl_sqrt_, SFX)(- z) ;
}
}
}
if (flags & VL_VLAD_FLAG_NORMALIZE_COMPONENTS) {
TYPE n = 0 ;
dim = 0 ;
for(dim = 0; dim < dimension; dim++) {
TYPE z = enc[i_cl*dimension + dim] ;
n += z * z ;
}
n = VL_XCAT(vl_sqrt_, SFX)(n) ;
n = VL_MAX(n, 1e-12) ;
for(dim = 0; dim < dimension; dim++) {
enc[i_cl*dimension + dim] /= n ;
}
}
}
if (! (flags & VL_VLAD_FLAG_UNNORMALIZED)) {
TYPE n = 0 ;
for(dim = 0 ; dim < dimension * numClusters ; dim++) {
TYPE z = enc [dim] ;
n += z * z ;
}
n = VL_XCAT(vl_sqrt_, SFX)(n) ;
n = VL_MAX(n, 1e-12) ;
for(dim = 0 ; dim < dimension * numClusters ; dim++) {
enc[dim] /= n ;
}
}
}
/* VL_FISHER_INSTANTIATING */
#else
#ifndef __DOXYGEN__
#define FLT VL_TYPE_FLOAT
#define TYPE float
#define SFX f
#define VL_VLAD_INSTANTIATING
#include "vlad.c"
#define FLT VL_TYPE_DOUBLE
#define TYPE double
#define SFX d
#define VL_VLAD_INSTANTIATING
#include "vlad.c"
#endif
/* VL_VLAD_INSTANTIATING */
#endif
/* ================================================================ */
#ifndef VL_VLAD_INSTANTIATING
/** @brief VLAD encoding of a set of vectors.
** @param enc output VLAD encoding (out).
** @param dataType the type of the input data (::VL_TYPE_DOUBLE or ::VL_TYPE_FLOAT).
** @param numData number of data vectors to encode.
** @param means cluster means.
** @param numClusters number of clusters.
** @param data the data vectors to encode.
** @param dimension dimensionality of the data.
** @param assignments data to cluster soft assignments.
** @param flags options.
**
** @a enc is the VLAD vector of size @a numClusters by
** @a dimension. @a means is a matrix with @a numClusters columns and
** @a dimension rows. @a data is the matrix of vectors to be encoded,
** with @a dimension rows and @a numData columns. @a assignments is a
** matrix with @a numClusters rows and @a numData columns.
** All the matrices should be stored in column-major order.
**
** @a flag allows controlling further options:
** ::VL_VLAD_FLAG_NORMALIZE_COMPONENTS, ::VL_VLAD_FLAG_SQUARE_ROOT,
** ::VL_VLAD_FLAG_UNNORMALIZED, and ::VL_VLAD_FLAG_NORMALIZE_MASS.
**
** @sa @ref vlad
**/
void
vl_vlad_encode (void * enc, vl_type dataType,
void const * means, vl_size dimension, vl_size numClusters,
void const * data, vl_size numData,
void const * assignments,
int flags)
{
switch(dataType) {
case VL_TYPE_FLOAT:
_vl_vlad_encode_f ((float *) enc,
(float const *) means, dimension, numClusters,
(float const *) data, numData,
(float const *) assignments, flags) ;
break;
case VL_TYPE_DOUBLE:
_vl_vlad_encode_d ((double *) enc,
(double const *) means, dimension, numClusters,
(double const *) data, numData,
(double const *) assignments, flags) ;
break;
default:
abort();
}
}
/* ! VL_VLAD_INSTANTIATING */
#endif
#undef SFX
#undef TYPE
#undef FLT
#undef VL_VLAD_INSTANTIATING
|
omp_SH_to_spat.gen.c | /*
* Copyright (c) 2010-2015 Centre National de la Recherche Scientifique.
* written by Nathanael Schaeffer (CNRS, ISTerre, Grenoble, France).
*
* nathanael.schaeffer@ujf-grenoble.fr
*
* This software is governed by the CeCILL license under French law and
* abiding by the rules of distribution of free software. You can use,
* modify and/or redistribute the software under the terms of the CeCILL
* license as circulated by CEA, CNRS and INRIA at the following URL
* "http://www.cecill.info".
*
* The fact that you are presently reading this means that you have had
* knowledge of the CeCILL license and that you accept its terms.
*
*/
# This file is meta-code for SHT.c (spherical harmonic transform).
# it is intended for "make" to generate C code for similar SHT functions,
# from one generic function + tags.
# > See Makefile and SHT.c
# Basically, there are tags at the beginning of lines that are information
# to keep or remove the line depending on the function to build.
# tags :
# Q : line for scalar transform
# V : line for vector transform (both spheroidal and toroidal)
# S : line for vector transfrom, spheroidal component
# T : line for vector transform, toroidal component.
static
3 void GEN3(_sy3,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Qlm, cplx *Slm, cplx *Tlm, v2d *BrF, v2d *BtF, v2d *BpF, const long int llim, const int imlim) {
QX void GEN3(_sy1,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Qlm, v2d *BrF, const long int llim, const int imlim) {
#ifndef SHT_GRAD
VX void GEN3(_sy2,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Slm, cplx *Tlm, v2d *BtF, v2d *BpF, const long int llim, const int imlim) {
#else
S void GEN3(_sy1s,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Slm, v2d *BtF, v2d *BpF, const long int llim, const int imlim) {
T void GEN3(_sy1t,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Tlm, v2d *BtF, v2d *BpF, const long int llim, const int imlim) {
#endif
#ifndef SHT_AXISYM
Q #define BR0(i) ((double *)BrF)[2*(i)]
V #define BT0(i) ((double *)BtF)[2*(i)]
V #define BP0(i) ((double *)BpF)[2*(i)]
Q #define qr(l) vall(creal(Ql[l]))
Q #define qi(l) vall(cimag(Ql[l]))
S #define sr(l) vall(creal(Sl[l]))
S #define si(l) vall(cimag(Sl[l]))
T #define tr(l) vall(creal(Tl[l]))
T #define ti(l) vall(cimag(Tl[l]))
V double m_1;
unsigned im;
#else
Q #define BR0(i) ((double *)BrF)[i]
S #define BT0(i) ((double *)BtF)[i]
T #define BP0(i) ((double *)BpF)[i]
#endif
unsigned m0, mstep;
long int nk,k,l,m;
double *alm, *al;
double *ct, *st;
Q double Ql0[llim+1];
S double Sl0[llim];
T double Tl0[llim];
ct = shtns->ct; st = shtns->st;
nk = NLAT_2;
#if _GCC_VEC_
nk = ((unsigned)(nk+VSIZE2-1)) / VSIZE2;
#endif
#ifndef _OPENMP
m0 = 0; mstep = 1;
#else
m0 = omp_get_thread_num();
mstep = omp_get_num_threads();
if (m0 == 0)
#endif
{ // im=0;
#ifdef SHT_GRAD
#ifndef SHT_AXISYM
#ifdef _GCC_VEC_
S k=0; do { BpF[k]=vdup(0.0); } while(++k<NLAT_2);
T k=0; do { BtF[k]=vdup(0.0); } while(++k<NLAT_2);
#else
S k=0; do { BpF[k]=vdup(0.0); } while(++k<NLAT);
T k=0; do { BtF[k]=vdup(0.0); } while(++k<NLAT);
#endif
#else
S if (BpF != NULL) { int k=0; do { BpF[k]=vdup(0.0); } while(++k<NLAT_2); }
T if (BtF != NULL) { int k=0; do { BtF[k]=vdup(0.0); } while(++k<NLAT_2); }
#endif
#endif
l=1;
alm = shtns->alm;
Q Ql0[0] = (double) Qlm[0]; // l=0
do { // for m=0, compress the complex Q,S,T to double
Q Ql0[l] = (double) Qlm[l]; // Ql[l+1] = (double) Qlm[l+1];
S Sl0[l-1] = (double) Slm[l]; // Sl[l] = (double) Slm[l+1];
T Tl0[l-1] = (double) Tlm[l]; // Tl[l] = (double) Tlm[l+1];
++l;
} while(l<=llim);
k=0;
do {
l=0; al = alm;
rnd cost[NWAY], y0[NWAY], y1[NWAY];
V rnd sint[NWAY], dy0[NWAY], dy1[NWAY];
Q rnd re[NWAY], ro[NWAY];
S rnd te[NWAY], to[NWAY];
T rnd pe[NWAY], po[NWAY];
for (int j=0; j<NWAY; ++j) {
cost[j] = vread(ct, j+k);
V sint[j] = -vread(st, j+k);
y0[j] = vall(al[0]);
V dy0[j] = vall(0.0);
Q re[j] = y0[j] * vall(Ql0[0]);
S to[j] = dy0[j];
T po[j] = dy0[j];
}
for (int j=0; j<NWAY; ++j) {
y1[j] = vall(al[0]*al[1]) * cost[j];
V dy1[j] = vall(al[0]*al[1]) * sint[j];
}
for (int j=0; j<NWAY; ++j) {
Q ro[j] = y1[j] * vall(Ql0[1]);
S te[j] = dy1[j] * vall(Sl0[0]);
T pe[j] = -dy1[j] * vall(Tl0[0]);
}
al+=2; l+=2;
while(l<llim) {
for (int j=0; j<NWAY; ++j) {
V dy0[j] = vall(al[1])*(cost[j]*dy1[j] + y1[j]*sint[j]) + vall(al[0])*dy0[j];
y0[j] = vall(al[1])*(cost[j]*y1[j]) + vall(al[0])*y0[j];
}
for (int j=0; j<NWAY; ++j) {
Q re[j] += y0[j] * vall(Ql0[l]);
S to[j] += dy0[j] * vall(Sl0[l-1]);
T po[j] -= dy0[j] * vall(Tl0[l-1]);
}
for (int j=0; j<NWAY; ++j) {
V dy1[j] = vall(al[3])*(cost[j]*dy0[j] + y0[j]*sint[j]) + vall(al[2])*dy1[j];
y1[j] = vall(al[3])*(cost[j]*y0[j]) + vall(al[2])*y1[j];
}
for (int j=0; j<NWAY; ++j) {
Q ro[j] += y1[j] * vall(Ql0[l+1]);
S te[j] += dy1[j] * vall(Sl0[l]);
T pe[j] -= dy1[j] * vall(Tl0[l]);
}
al+=4; l+=2;
}
if (l==llim) {
for (int j=0; j<NWAY; ++j) {
V dy0[j] = vall(al[1])*(cost[j]*dy1[j] + y1[j]*sint[j]) + vall(al[0])*dy0[j];
y0[j] = vall(al[1])*cost[j]*y1[j] + vall(al[0])*y0[j];
}
for (int j=0; j<NWAY; ++j) {
Q re[j] += y0[j] * vall(Ql0[l]);
S to[j] += dy0[j] * vall(Sl0[l-1]);
T po[j] -= dy0[j] * vall(Tl0[l-1]);
}
}
#if _GCC_VEC_
for (int j=0; j<NWAY; ++j) {
Q S2D_STORE(BrF, j+k, re[j], ro[j])
S S2D_STORE(BtF, j+k, te[j], to[j])
T S2D_STORE(BpF, j+k, pe[j], po[j])
}
#else
for (int j=0; j<NWAY; ++j) {
Q BR0(k+j) = (re[j]+ro[j]);
Q BR0(NLAT-k-1-j) = (re[j]-ro[j]);
S BT0(k+j) = (te[j]+to[j]);
S BT0(NLAT-k-1-j) = (te[j]-to[j]);
T BP0(k+j) = (pe[j]+po[j]);
T BP0(NLAT-k-1-j) = (pe[j]-po[j]);
}
#endif
k+=NWAY;
} while (k < nk);
m0=mstep;
}
#ifndef SHT_AXISYM
#if _GCC_VEC_
Q BrF += m0*NLAT_2;
V BtF += m0*NLAT_2; BpF += m0*NLAT_2;
#else
Q BrF += m0*NLAT;
V BtF += m0*NLAT; BpF += m0*NLAT;
#endif
for (im=m0; im<imlim; im+=mstep) {
m = im*MRES;
//l = LiM(shtns, 0,im);
l = (im*(2*(LMAX+1)-(m+MRES)))>>1;
V m_1 = 1.0/m;
//alm = shtns->alm[im];
alm = shtns->alm + im*(2*LMAX -m+MRES);
Q cplx* Ql = &Qlm[l]; // virtual pointer for l=0 and im
S cplx* Sl = &Slm[l]; // virtual pointer for l=0 and im
T cplx* Tl = &Tlm[l];
k=0; l=shtns->tm[im];
#if _GCC_VEC_
l>>=1; // stay on a 16 byte boundary
while (k<l) { // polar optimization
Q BrF[k] = vdup(0.0); BrF[(NPHI-2*im)*NLAT_2 + k] = vdup(0.0);
Q BrF[NLAT_2-l+k] = vdup(0.0); BrF[(NPHI+1-2*im)*NLAT_2 -l+k] = vdup(0.0);
V BtF[k] = vdup(0.0); BtF[(NPHI-2*im)*NLAT_2 + k] = vdup(0.0);
V BtF[NLAT_2-l+k] = vdup(0.0); BtF[(NPHI+1-2*im)*NLAT_2 -l+k] = vdup(0.0);
V BpF[k] = vdup(0.0); BpF[(NPHI-2*im)*NLAT_2 + k] = vdup(0.0);
V BpF[NLAT_2-l+k] = vdup(0.0); BpF[(NPHI+1-2*im)*NLAT_2 -l+k] = vdup(0.0);
++k;
}
k = ((unsigned) k) / (VSIZE2/2);
#else
while (k<l) { // polar optimization
Q BrF[k] = 0.0; BrF[NLAT-l+k] = 0.0;
V BtF[k] = 0.0; BtF[NLAT-l+k] = 0.0;
V BpF[k] = 0.0; BpF[NLAT-l+k] = 0.0;
++k;
}
#endif
do {
al = alm;
rnd cost[NWAY], y0[NWAY], y1[NWAY];
V rnd st2[NWAY], dy0[NWAY], dy1[NWAY];
Q rnd rer[NWAY], rei[NWAY], ror[NWAY], roi[NWAY];
V rnd ter[NWAY], tei[NWAY], tor[NWAY], toi[NWAY];
V rnd per[NWAY], pei[NWAY], por[NWAY], poi[NWAY];
for (int j=0; j<NWAY; ++j) {
cost[j] = vread(st, k+j);
y0[j] = vall(1.0);
V st2[j] = cost[j]*cost[j]*vall(-m_1);
V y0[j] = vall(m); // for the vector transform, compute ylm*m/sint
}
Q l=m;
V l=m-1;
long int ny = 0;
if ((int)llim <= SHT_L_RESCALE_FLY) {
do { // sin(theta)^m
if (l&1) for (int j=0; j<NWAY; ++j) y0[j] *= cost[j];
for (int j=0; j<NWAY; ++j) cost[j] *= cost[j];
} while(l >>= 1);
} else {
long int nsint = 0;
do { // sin(theta)^m (use rescaling to avoid underflow)
if (l&1) {
for (int j=0; j<NWAY; ++j) y0[j] *= cost[j];
ny += nsint;
if (vlo(y0[0]) < (SHT_ACCURACY+1.0/SHT_SCALE_FACTOR)) {
ny--;
for (int j=0; j<NWAY; ++j) y0[j] *= vall(SHT_SCALE_FACTOR);
}
}
for (int j=0; j<NWAY; ++j) cost[j] *= cost[j];
nsint += nsint;
if (vlo(cost[0]) < 1.0/SHT_SCALE_FACTOR) {
nsint--;
for (int j=0; j<NWAY; ++j) cost[j] *= vall(SHT_SCALE_FACTOR);
}
} while(l >>= 1);
}
for (int j=0; j<NWAY; ++j) {
y0[j] *= vall(al[0]);
cost[j] = vread(ct, j+k);
V dy0[j] = cost[j]*y0[j];
Q ror[j] = vall(0.0); roi[j] = vall(0.0);
Q rer[j] = vall(0.0); rei[j] = vall(0.0);
}
for (int j=0; j<NWAY; ++j) {
y1[j] = (vall(al[1])*y0[j]) *cost[j]; // y1[j] = vall(al[1])*cost[j]*y0[j];
V por[j] = vall(0.0); tei[j] = vall(0.0);
V tor[j] = vall(0.0); pei[j] = vall(0.0);
V dy1[j] = (vall(al[1])*y0[j]) *(cost[j]*cost[j] + st2[j]); // dy1[j] = vall(al[1])*(cost[j]*dy0[j] - y0[j]*st2[j]);
V poi[j] = vall(0.0); ter[j] = vall(0.0);
V toi[j] = vall(0.0); per[j] = vall(0.0);
}
l=m; al+=2;
while ((ny<0) && (l<llim)) { // ylm treated as zero and ignored if ny < 0
for (int j=0; j<NWAY; ++j) {
y0[j] = vall(al[1])*(cost[j]*y1[j]) + vall(al[0])*y0[j];
V dy0[j] = vall(al[1])*(cost[j]*dy1[j] + y1[j]*st2[j]) + vall(al[0])*dy0[j];
}
for (int j=0; j<NWAY; ++j) {
y1[j] = vall(al[3])*(cost[j]*y0[j]) + vall(al[2])*y1[j];
V dy1[j] = vall(al[3])*(cost[j]*dy0[j] + y0[j]*st2[j]) + vall(al[2])*dy1[j];
}
l+=2; al+=4;
if (fabs(vlo(y0[NWAY-1])) > SHT_ACCURACY*SHT_SCALE_FACTOR + 1.0) { // rescale when value is significant
++ny;
for (int j=0; j<NWAY; ++j) {
y0[j] *= vall(1.0/SHT_SCALE_FACTOR); y1[j] *= vall(1.0/SHT_SCALE_FACTOR);
V dy0[j] *= vall(1.0/SHT_SCALE_FACTOR); dy1[j] *= vall(1.0/SHT_SCALE_FACTOR);
}
}
}
if (ny == 0) {
while (l<llim) { // compute even and odd parts
Q for (int j=0; j<NWAY; ++j) { rer[j] += y0[j] * qr(l); rei[j] += y0[j] * qi(l); }
Q for (int j=0; j<NWAY; ++j) { ror[j] += y1[j] * qr(l+1); roi[j] += y1[j] * qi(l+1); }
#ifdef SHT_GRAD
S for (int j=0; j<NWAY; ++j) { tor[j] += dy0[j] * sr(l); pei[j] += y0[j] * sr(l); }
S for (int j=0; j<NWAY; ++j) { toi[j] += dy0[j] * si(l); per[j] -= y0[j] * si(l); }
T for (int j=0; j<NWAY; ++j) { por[j] -= dy0[j] * tr(l); tei[j] += y0[j] * tr(l); }
T for (int j=0; j<NWAY; ++j) { poi[j] -= dy0[j] * ti(l); ter[j] -= y0[j] * ti(l); }
S for (int j=0; j<NWAY; ++j) { ter[j] += dy1[j] * sr(l+1); poi[j] += y1[j] * sr(l+1); }
S for (int j=0; j<NWAY; ++j) { tei[j] += dy1[j] * si(l+1); por[j] -= y1[j] * si(l+1); }
T for (int j=0; j<NWAY; ++j) { per[j] -= dy1[j] * tr(l+1); toi[j] += y1[j] * tr(l+1); }
T for (int j=0; j<NWAY; ++j) { pei[j] -= dy1[j] * ti(l+1); tor[j] -= y1[j] * ti(l+1); }
#else
V for (int j=0; j<NWAY; ++j) {
V tor[j] += dy0[j] * sr(l) - y1[j] * ti(l+1);
V pei[j] += y0[j] * sr(l) - dy1[j] * ti(l+1);
V }
V for (int j=0; j<NWAY; ++j) {
V poi[j] -= dy0[j] * ti(l) - y1[j] * sr(l+1);
V ter[j] -= y0[j] * ti(l) - dy1[j] * sr(l+1);
V }
V for (int j=0; j<NWAY; ++j) {
V toi[j] += dy0[j] * si(l) + y1[j] * tr(l+1);
V per[j] -= y0[j] * si(l) + dy1[j] * tr(l+1);
V }
V for (int j=0; j<NWAY; ++j) {
V por[j] -= dy0[j] * tr(l) + y1[j] * si(l+1);
V tei[j] += y0[j] * tr(l) + dy1[j] * si(l+1);
V }
#endif
for (int j=0; j<NWAY; ++j) {
V dy0[j] = vall(al[1])*(cost[j]*dy1[j] + y1[j]*st2[j]) + vall(al[0])*dy0[j];
y0[j] = vall(al[1])*(cost[j]*y1[j]) + vall(al[0])*y0[j];
}
for (int j=0; j<NWAY; ++j) {
V dy1[j] = vall(al[3])*(cost[j]*dy0[j] + y0[j]*st2[j]) + vall(al[2])*dy1[j];
y1[j] = vall(al[3])*(cost[j]*y0[j]) + vall(al[2])*y1[j];
}
l+=2; al+=4;
}
if (l==llim) {
Q for (int j=0; j<NWAY; ++j) { rer[j] += y0[j] * qr(l); rei[j] += y0[j] * qi(l); }
S for (int j=0; j<NWAY; ++j) { tor[j] += dy0[j] * sr(l); pei[j] += y0[j] * sr(l); }
S for (int j=0; j<NWAY; ++j) { toi[j] += dy0[j] * si(l); per[j] -= y0[j] * si(l); }
T for (int j=0; j<NWAY; ++j) { por[j] -= dy0[j] * tr(l); tei[j] += y0[j] * tr(l); }
T for (int j=0; j<NWAY; ++j) { poi[j] -= dy0[j] * ti(l); ter[j] -= y0[j] * ti(l); }
}
3 for (int j=0; j<NWAY; ++j) cost[j] = vread(st, k+j) * vall(m_1);
3 for (int j=0; j<NWAY; ++j) { rer[j] *= cost[j]; ror[j] *= cost[j]; rei[j] *= cost[j]; roi[j] *= cost[j]; }
}
#if _GCC_VEC_
for (int j=0; j<NWAY; ++j) {
Q S2D_CSTORE(BrF, k+j, rer[j], ror[j], rei[j], roi[j])
V S2D_CSTORE(BtF, k+j, ter[j], tor[j], tei[j], toi[j])
V S2D_CSTORE(BpF, k+j, per[j], por[j], pei[j], poi[j])
}
#else
for (int j=0; j<NWAY; ++j) {
Q BrF[k+j] = (rer[j]+ror[j]) + I*(rei[j]+roi[j]);
Q BrF[NLAT-k-1-j] = (rer[j]-ror[j]) + I*(rei[j]-roi[j]);
V BtF[k+j] = (ter[j]+tor[j]) + I*(tei[j]+toi[j]);
V BtF[NLAT-1-k-j] = (ter[j]-tor[j]) + I*(tei[j]-toi[j]);
V BpF[k+j] = (per[j]+por[j]) + I*(pei[j]+poi[j]);
V BpF[NLAT-1-k-j] = (per[j]-por[j]) + I*(pei[j]-poi[j]);
}
#endif
k+=NWAY;
} while (k < nk);
#if _GCC_VEC_
Q BrF += mstep*NLAT_2;
V BtF += mstep*NLAT_2; BpF += mstep*NLAT_2;
#else
Q BrF += mstep*NLAT;
V BtF += mstep*NLAT; BpF += mstep*NLAT;
#endif
}
#if _GCC_VEC_
while(im <= NPHI-imlim) { // padding for high m's
k=0;
do {
Q BrF[k] = vdup(0.0);
V BtF[k] = vdup(0.0); BpF[k] = vdup(0.0);
} while (++k < NLAT_2);
Q BrF += mstep*NLAT_2;
V BtF += mstep*NLAT_2; BpF += mstep*NLAT_2;
im+=mstep;
}
#else
while(im <= NPHI/2) { // padding for high m's
k=0;
do {
Q BrF[k] = 0.0;
V BtF[k] = 0.0; BpF[k] = 0.0;
} while (++k < NLAT);
Q BrF += mstep*NLAT;
V BtF += mstep*NLAT; BpF += mstep*NLAT;
im+=mstep;
}
#endif
#endif
}
Q #undef BR0
V #undef BT0
V #undef BP0
Q #undef qr
Q #undef qi
S #undef sr
S #undef si
T #undef tr
T #undef ti
3 static void GEN3(SHqst_to_spat_omp,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Qlm, cplx *Slm, cplx *Tlm, double *Vr, double *Vt, double *Vp, long int llim) {
QX static void GEN3(SH_to_spat_omp,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Qlm, double *Vr, long int llim) {
#ifndef SHT_GRAD
VX static void GEN3(SHsphtor_to_spat_omp,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Slm, cplx *Tlm, double *Vt, double *Vp, long int llim) {
#else
S static void GEN3(SHsph_to_spat_omp,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Slm, double *Vt, double *Vp, long int llim) {
T static void GEN3(SHtor_to_spat_omp,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Tlm, double *Vt, double *Vp, long int llim) {
#endif
int k;
unsigned imlim = 0;
Q v2d* BrF = (v2d*) Vr;
V v2d* BtF = (v2d*) Vt; v2d* BpF = (v2d*) Vp;
#ifndef SHT_AXISYM
imlim = MTR;
#ifdef SHT_VAR_LTR
if (imlim*MRES > (unsigned) llim) imlim = ((unsigned) llim)/MRES; // 32bit mul and div should be faster
#endif
#ifdef _GCC_VEC_
if (shtns->fftc_mode > 0) { // alloc memory for the FFT
unsigned long nv = shtns->nspat;
QX BrF = (v2d*) VMALLOC( nv * sizeof(double) );
VX BtF = (v2d*) VMALLOC( 2*nv * sizeof(double) );
VX BpF = BtF + nv/2;
3 BrF = (v2d*) VMALLOC( 3*nv * sizeof(double) );
3 BtF = BrF + nv/2; BpF = BrF + nv;
}
#else
if (shtns->ncplx_fft > 0) { // alloc memory for the FFT
QX BrF = VMALLOC( shtns->ncplx_fft * sizeof(cplx) );
VX BtF = VMALLOC( 2* shtns->ncplx_fft * sizeof(cplx) );
VX BpF = BtF + shtns->ncplx_fft;
3 BrF = VMALLOC( 3* shtns->ncplx_fft * sizeof(cplx) );
3 BtF = BrF + shtns->ncplx_fft; BpF = BtF + shtns->ncplx_fft;
}
#endif
#endif
imlim += 1;
#pragma omp parallel num_threads(shtns->nthreads)
{
3 GEN3(_sy3,NWAY,SUFFIX)(shtns, Qlm, Slm, Tlm, BrF, BtF, BpF, llim, imlim);
QX GEN3(_sy1,NWAY,SUFFIX)(shtns, Qlm, BrF, llim, imlim);
#ifndef SHT_GRAD
VX GEN3(_sy2,NWAY,SUFFIX)(shtns, Slm, Tlm, BtF, BpF, llim, imlim);
#else
S GEN3(_sy1s,NWAY,SUFFIX)(shtns, Slm, BtF, BpF, llim, imlim);
T GEN3(_sy1t,NWAY,SUFFIX)(shtns, Tlm, BtF, BpF, llim, imlim);
#endif
#ifndef SHT_AXISYM
V #ifndef HAVE_LIBFFTW3_OMP
V #pragma omp barrier
V #if _GCC_VEC_
V if (shtns->fftc_mode == 0) {
3 #pragma omp single nowait
3 fftw_execute_dft(shtns->ifftc, (cplx *) BrF, (cplx *) Vr);
V #pragma omp single nowait
V fftw_execute_dft(shtns->ifftc, (cplx *) BtF, (cplx *) Vt);
V #pragma omp single nowait
V fftw_execute_dft(shtns->ifftc, (cplx *) BpF, (cplx *) Vp);
V } else if (shtns->fftc_mode > 0) { // split dft
3 #pragma omp single nowait
3 fftw_execute_split_dft(shtns->ifftc,((double*)BrF)+1, ((double*)BrF), Vr+NPHI, Vr);
V #pragma omp single nowait
V fftw_execute_split_dft(shtns->ifftc,((double*)BtF)+1, ((double*)BtF), Vt+NPHI, Vt);
V #pragma omp single nowait
V fftw_execute_split_dft(shtns->ifftc,((double*)BpF)+1, ((double*)BpF), Vp+NPHI, Vp);
V }
V #else
3 #pragma omp single nowait
3 fftw_execute_dft_c2r(shtns->ifft, (cplx *) BrF, Vr);
V #pragma omp single nowait
V fftw_execute_dft_c2r(shtns->ifft, (cplx *) BtF, Vt);
V #pragma omp single nowait
V fftw_execute_dft_c2r(shtns->ifft, (cplx *) BpF, Vp);
V #endif
V #endif
#endif
}
#ifndef SHT_AXISYM
// NPHI > 1 as SHT_AXISYM is not defined.
#if _GCC_VEC_
if (shtns->fftc_mode >= 0) {
if (shtns->fftc_mode == 0) {
V #ifdef HAVE_LIBFFTW3_OMP
Q fftw_execute_dft(shtns->ifftc, (cplx *) BrF, (cplx *) Vr);
V fftw_execute_dft(shtns->ifftc, (cplx *) BtF, (cplx *) Vt);
V fftw_execute_dft(shtns->ifftc, (cplx *) BpF, (cplx *) Vp);
V #endif
} else { // split dft
V #ifdef HAVE_LIBFFTW3_OMP
Q fftw_execute_split_dft(shtns->ifftc,((double*)BrF)+1, ((double*)BrF), Vr+NPHI, Vr);
V fftw_execute_split_dft(shtns->ifftc,((double*)BtF)+1, ((double*)BtF), Vt+NPHI, Vt);
V fftw_execute_split_dft(shtns->ifftc,((double*)BpF)+1, ((double*)BpF), Vp+NPHI, Vp);
V #endif
Q VFREE(BrF);
VX VFREE(BtF); // this frees also BpF.
}
}
#else
if (shtns->ncplx_fft >= 0) {
V #ifdef HAVE_LIBFFTW3_OMP
Q fftw_execute_dft_c2r(shtns->ifft, (cplx *) BrF, Vr);
V fftw_execute_dft_c2r(shtns->ifft, (cplx *) BtF, Vt);
V fftw_execute_dft_c2r(shtns->ifft, (cplx *) BpF, Vp);
V #endif
if (shtns->ncplx_fft > 0) { // free memory
Q VFREE(BrF);
VX VFREE(BtF); // this frees also BpF.
}
}
#endif
#endif
}
|
reduction_minus_2.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main()
{
int result = 100;
#pragma omp parallel reduction(-:result)
{
result -= omp_get_thread_num();
}
printf("Result: %d\n", result);
}
|
GB_binop__ne_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_int8)
// A*D function (colscale): GB (_AxD__ne_int8)
// D*A function (rowscale): GB (_DxB__ne_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_int8)
// C=scalar+B GB (_bind1st__ne_int8)
// C=scalar+B' GB (_bind1st_tran__ne_int8)
// C=A+scalar GB (_bind2nd__ne_int8)
// C=A'+scalar GB (_bind2nd_tran__ne_int8)
// C type: bool
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT8 || GxB_NO_NE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sor.c | #include "sor.h"
#include <math.h>
#include "boundary_val.h"
#include "helper.h"
#include <omp.h>
#define SOR_ITER_MAX 3
int sor(
double omg,
double dx,
double dy,
double dz,
int imax,
int jmax,
int kmax,
double ***P,
double ***RS,
double *res,
int ***Flag,
struct p_pointer *PP1,
int FluidCells
/*double presLeft,
double presRight*/
) {
int i,j,k;
int s;
double rloc;
double coeff = omg/(2.0*(1.0/(dx*dx)+1.0/(dy*dy)+1.0/(dz*dz)));
double tmp = 0;
double dx2 = 1.0/(dx*dx);
double dy2 = 1.0/(dy*dy);
double dz2 = 1.0/(dz*dz);
double omg2 = 1-omg;
int sor_iter = 0;
/* SOR iteration */
for(sor_iter = 1; sor_iter <= SOR_ITER_MAX; sor_iter++) {
#pragma omp parallel for private(i,j,k)
for(s=0;s<FluidCells;s++){
struct p_pointer *pp = &PP1[s];
if(pp->p){
i = pp->i;
j = pp->j;
k = pp->k;
P[i][j][k] = omg2*P[i][j][k]+
coeff*(
(P[i+1][j ][k ]+P[i-1][j ][k ])*dx2 +
(P[i ][j+1][k ]+P[i ][j-1][k ])*dy2 +
(P[i ][j ][k+1]+P[i ][j ][k-1])*dz2
- RS[i ][j ][k ]
);
}
}
if(sor_iter==SOR_ITER_MAX){
rloc = 0;
#pragma omp parallel for private(i,j,k,tmp)
for(s=0;s<FluidCells;s++){
struct p_pointer *pp = &PP1[s];
i = pp->i;
j = pp->j;
k = pp->k;
if(isfluid(Flag[i][j][k]) && !emptyneighbor(Flag[i][j][k])){
tmp = (P[i+1][j][k]-2.0*P[i][j][k]+P[i-1][j][k])*dx2 + (P[i][j+1][k]-2.0*P[i][j][k]+P[i][j-1][k])*dy2 + (P[i][j][k+1]-2.0*P[i][j][k]+P[i][j][k-1])*dz2 - RS[i][j][k];
tmp = tmp*tmp;
rloc += tmp;
}
}
rloc = rloc/FluidCells;
rloc = sqrt(rloc);
/* set residual */
*res = rloc;
}
boundaryvalues_pressure(P,Flag,imax,jmax,kmax);
}
/* compute the residual */
return SOR_ITER_MAX;
//check if given pressure, set Dirichlet or Neuman BC according to that, for left and right wall
/*for (j=0; j<=jmax+1; j++) {
if((Flag[0][j] & 32) != 0) {//pressure given -> overwrite with Dirichlet BC
P[0][j] = presLeft*2.0 - P[1][j];
}
if((Flag[imax+1][j] & 32) != 0) {//pressure given -> overwrite with Dirichlet BC
P[imax+1][j] = 2.0*presRight - P[imax][j];
}
}*/
}
|
GB_reduce_panel.c | //------------------------------------------------------------------------------
// GB_reduce_panel: s=reduce(A), reduce a matrix to a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Reduce a matrix to a scalar using a panel-based method for built-in
// operators. No typecasting is performed. A must be sparse, hypersparse,
// or full (it cannot be bitmap). A cannot have any zombies. If A has zombies
// or is bitmap, GB_reduce_to_scalar_template is used instead.
{
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ;
int64_t anz = GB_NNZ (A) ;
ASSERT (anz > 0) ;
ASSERT (!GB_IS_BITMAP (A)) ;
ASSERT (A->nzombies == 0) ;
#if GB_IS_ANY_MONOID
// the ANY monoid can take any entry, and terminate immediately
s = Ax [anz-1] ;
#else
//--------------------------------------------------------------------------
// reduce A to a scalar
//--------------------------------------------------------------------------
if (nthreads == 1)
{
//----------------------------------------------------------------------
// load the Panel with the first entries
//----------------------------------------------------------------------
GB_ATYPE Panel [GB_PANEL] ;
int64_t first_panel_size = GB_IMIN (GB_PANEL, anz) ;
for (int64_t k = 0 ; k < first_panel_size ; k++)
{
Panel [k] = Ax [k] ;
}
#if GB_HAS_TERMINAL
int panel_count = 0 ;
#endif
//----------------------------------------------------------------------
// reduce all entries to the Panel
//----------------------------------------------------------------------
for (int64_t p = GB_PANEL ; p < anz ; p += GB_PANEL)
{
if (p + GB_PANEL > anz)
{
// last partial panel
for (int64_t k = 0 ; k < anz-p ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
}
else
{
// whole panel
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
#if GB_HAS_TERMINAL
panel_count-- ;
if (panel_count <= 0)
{
// check for early exit only every 256 panels
panel_count = 256 ;
int count = 0 ;
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
count += (Panel [k] == GB_TERMINAL_VALUE) ;
}
if (count > 0)
{
break ;
}
}
#endif
}
}
//----------------------------------------------------------------------
// s = reduce (Panel)
//----------------------------------------------------------------------
s = Panel [0] ;
for (int64_t k = 1 ; k < first_panel_size ; k++)
{
// s = op (s, Panel [k]) ;
GB_ADD_ARRAY_TO_SCALAR (s, Panel, k) ;
}
}
else
{
//----------------------------------------------------------------------
// all tasks share a single early_exit flag
//----------------------------------------------------------------------
// If this flag gets set, all tasks can terminate early
#if GB_HAS_TERMINAL
bool early_exit = false ;
#endif
//----------------------------------------------------------------------
// each thread reduces its own slice in parallel
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// determine the work for this task
//------------------------------------------------------------------
// Task tid reduces Ax [pstart:pend-1] to the scalar W [tid]
int64_t pstart, pend ;
GB_PARTITION (pstart, pend, anz, tid, ntasks) ;
GB_ATYPE t = Ax [pstart] ;
//------------------------------------------------------------------
// skip this task if the terminal value has already been reached
//------------------------------------------------------------------
#if GB_HAS_TERMINAL
// check if another task has called for an early exit
bool my_exit ;
GB_ATOMIC_READ
my_exit = early_exit ;
if (!my_exit)
#endif
//------------------------------------------------------------------
// do the reductions for this task
//------------------------------------------------------------------
{
//--------------------------------------------------------------
// load the Panel with the first entries
//--------------------------------------------------------------
GB_ATYPE Panel [GB_PANEL] ;
int64_t my_anz = pend - pstart ;
int64_t first_panel_size = GB_IMIN (GB_PANEL, my_anz) ;
for (int64_t k = 0 ; k < first_panel_size ; k++)
{
Panel [k] = Ax [pstart + k] ;
}
#if GB_HAS_TERMINAL
int panel_count = 0 ;
#endif
//--------------------------------------------------------------
// reduce all entries to the Panel
//--------------------------------------------------------------
for (int64_t p = pstart + GB_PANEL ; p < pend ; p += GB_PANEL)
{
if (p + GB_PANEL > pend)
{
// last partial panel
for (int64_t k = 0 ; k < pend-p ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
}
else
{
// whole panel
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
#if GB_HAS_TERMINAL
panel_count-- ;
if (panel_count <= 0)
{
// check for early exit only every 256 panels
panel_count = 256 ;
int count = 0 ;
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
count += (Panel [k] == GB_TERMINAL_VALUE) ;
}
if (count > 0)
{
break ;
}
}
#endif
}
}
//--------------------------------------------------------------
// t = reduce (Panel)
//--------------------------------------------------------------
t = Panel [0] ;
for (int64_t k = 1 ; k < first_panel_size ; k++)
{
// t = op (t, Panel [k]) ;
GB_ADD_ARRAY_TO_SCALAR (t, Panel, k) ;
}
#if GB_HAS_TERMINAL
if (t == GB_TERMINAL_VALUE)
{
// tell all other tasks to exit early
GB_ATOMIC_WRITE
early_exit = true ;
}
#endif
}
//------------------------------------------------------------------
// save the results of this task
//------------------------------------------------------------------
W [tid] = t ;
}
//----------------------------------------------------------------------
// sum up the results of each slice using a single thread
//----------------------------------------------------------------------
s = W [0] ;
for (int tid = 1 ; tid < ntasks ; tid++)
{
// s = op (s, W [tid]), no typecast
GB_ADD_ARRAY_TO_SCALAR (s, W, tid) ;
}
}
#endif
}
|
DRB091-threadprivate2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A file-scope variable used within a function called by a parallel region.
Use threadprivate to avoid data races.
This is the case for a variable referenced within a construct.
*/
#include "omprace.h"
#include <omp.h>
#include <stdio.h>
#include <assert.h>
int sum0=0, sum1=0;
#pragma omp threadprivate(sum0)
int main()
{
omprace_init();
int len=1000;
int i, sum=0;
#pragma omp parallel copyin(sum0)
{
#pragma omp for
for (i=0;i<len;i++)
{
sum0=sum0+i;
}
#pragma omp critical
{
sum= sum+sum0;
}
}
/* reference calculation */
for (i=0;i<len;i++)
{
sum1=sum1+i;
}
printf("sum=%d; sum1=%d\n",sum,sum1);
assert(sum==sum1);
omprace_fini();
return 0;
}
|
cpu_rnnt.h | #pragma once
#include <tuple>
#include <cmath>
#include <cstring>
#include <limits>
#include <algorithm>
#include <numeric>
#include <chrono>
#if !defined(RNNT_DISABLE_OMP) && !defined(APPLE)
#include <omp.h>
#endif
#include "rnnt_helper.h"
template<typename ProbT>
class CpuRNNT {
public:
// Noncopyable
CpuRNNT(int minibatch, int maxT, int maxU, int alphabet_size, void* workspace,
int blank, int num_threads, bool batch_first) :
minibatch_(minibatch), maxT_(maxT), maxU_(maxU), alphabet_size_(alphabet_size),
workspace_(workspace), blank_(blank), num_threads_(num_threads), batch_first(batch_first) {
#if defined(RNNT_DISABLE_OMP) || defined(APPLE)
#else
if (num_threads > 0) {
omp_set_num_threads(num_threads);
} else {
num_threads_ = omp_get_max_threads();
}
#endif
};
CpuRNNT(const CpuRNNT&) = delete;
CpuRNNT& operator=(const CpuRNNT&) = delete;
rnntStatus_t cost_and_grad(const ProbT* const log_probs,
ProbT* grads,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
rnntStatus_t score_forward(const ProbT* const log_probs,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
private:
class CpuRNNT_index {
public:
CpuRNNT_index(int U, int maxU, int minibatch, int alphabet_size, bool batch_first);
int U;
int maxU;
int minibatch;
int alphabet_size;
bool batch_first;
int operator()(int t, int u);
int operator()(int t, int u, int v);
};
class CpuRNNT_metadata {
public:
CpuRNNT_metadata(int T, int U, void* workspace, size_t bytes_used, int blank,
const int* const labels, const ProbT* const log_probs, CpuRNNT_index& idx);
ProbT* alphas;
ProbT* betas;
ProbT* log_probs2; // only store blank & label
private:
void setup_probs(int T, int U, const int* const labels, int blank,
const ProbT* const log_probs, CpuRNNT_index& idx);
};
int minibatch_;
int maxT_;
int maxU_;
int alphabet_size_; // Number of characters plus blank
void* workspace_;
int blank_;
int num_threads_;
bool batch_first;
ProbT cost_and_grad_kernel(const ProbT* const log_probs, ProbT* grad,
const int* const labels, int mb,
int T, int U, size_t bytes_used);
ProbT compute_alphas(const ProbT* const log_probs, int T, int U, ProbT* alphas);
ProbT compute_betas_and_grad(ProbT* grad, const ProbT* const log_probs,
int T, int U, ProbT* alphas, ProbT* betas,
const int* const labels, ProbT logll);
};
template<typename ProbT>
CpuRNNT<ProbT>::CpuRNNT_metadata::CpuRNNT_metadata(int T, int U, void* workspace, size_t bytes_used, int blank,
const int* const labels, const ProbT* const log_probs, CpuRNNT_index& idx) {
alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * T * U;
betas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * T * U;
log_probs2 = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * T * U * 2;
setup_probs(T, U, labels, blank, log_probs, idx);
}
template<typename ProbT>
void
CpuRNNT<ProbT>::CpuRNNT_metadata::setup_probs(int T, int U, const int* const labels, int blank,
const ProbT* const log_probs, CpuRNNT_index& idx) {
for (int t = 0; t < T; ++t) {
for (int u = 0; u < U; ++u) {
int offset = (t * U + u) * 2;
log_probs2[offset] = log_probs[idx(t, u, blank)];
// labels do not have first blank
if (u < U-1) log_probs2[offset + 1] = log_probs[idx(t, u, labels[u])];
}
}
}
template<typename ProbT>
CpuRNNT<ProbT>::CpuRNNT_index::CpuRNNT_index(int U, int maxU, int minibatch, int alphabet_size, bool batch_first) :
U(U), maxU(maxU), minibatch(minibatch), alphabet_size(alphabet_size), batch_first(batch_first) {}
template<typename ProbT>
inline int CpuRNNT<ProbT>::CpuRNNT_index::operator()(int t, int u) {
return t * U + u;
}
template<typename ProbT>
inline int CpuRNNT<ProbT>::CpuRNNT_index::operator()(int t, int u, int v) {
if (batch_first)
return (t * maxU + u) * alphabet_size + v;
return (t * maxU + u) * minibatch * alphabet_size + v;
}
template<typename ProbT>
ProbT
CpuRNNT<ProbT>::cost_and_grad_kernel(const ProbT* const log_probs, ProbT* grad,
const int* const labels,
int mb, int T, int U, size_t bytes_used) {
CpuRNNT_index idx(U, maxU_, minibatch_, alphabet_size_, batch_first);
CpuRNNT_metadata rnntm(T, U, workspace_, bytes_used, blank_, labels, log_probs, idx);
if (batch_first) {
// zero grads
memset(grad, 0, sizeof(ProbT) * maxT_ * maxU_ * alphabet_size_);
}
ProbT llForward = compute_alphas(rnntm.log_probs2, T, U, rnntm.alphas);
ProbT llBackward = compute_betas_and_grad(grad, rnntm.log_probs2, T, U,
rnntm.alphas,
rnntm.betas,
labels,
llForward);
ProbT diff = std::abs(llForward - llBackward);
if (diff > 1e-1) {
printf("WARNING: Forward backward likelihood mismatch %f\n", diff);
}
return -llForward;
}
template<typename ProbT>
ProbT
CpuRNNT<ProbT>::compute_alphas(const ProbT* const log_probs, int T, int U, ProbT* alphas) {
CpuRNNT_index idx(U, maxU_, minibatch_, alphabet_size_, batch_first);
alphas[0] = 0;
for (int t = 0; t < T; ++t) {
for (int u = 0; u < U; ++u) {
if (u == 0 && t > 0)
alphas[idx(t, 0)] = alphas[idx(t-1, 0)] + log_probs[idx(t-1, 0) * 2];
if (t == 0 && u > 0)
alphas[idx(0, u)] = alphas[idx(0, u-1)] + log_probs[idx(0, u-1) * 2 + 1];
if (t > 0 && u > 0) {
ProbT no_emit = alphas[idx(t-1, u)] + log_probs[idx(t-1, u) * 2];
ProbT emit = alphas[idx(t, u-1)] + log_probs[idx(t, u-1) * 2 + 1];
alphas[idx(t, u)] = rnnt_helper::log_sum_exp<ProbT>(emit, no_emit);
}
}
}
#ifdef DEBUG_KERNEL
printf("cpu alphas:\n");
printf("%d %d\n", T, U);
for (int t = 0; t < T; t++) {
for (int u = 0; u < U; u++) {
printf("%.2f ", alphas[idx(t, u)]);
}
printf("\n");
}
printf("\n");
#endif
ProbT loglike = alphas[idx(T-1, U-1)] + log_probs[idx(T-1, U-1) * 2];
return loglike;
}
template<typename ProbT>
ProbT
CpuRNNT<ProbT>::compute_betas_and_grad(ProbT* grad, const ProbT* const log_probs,
int T, int U, ProbT* alphas, ProbT* betas,
const int* const labels, ProbT logll) {
CpuRNNT_index idx(U, maxU_, minibatch_, alphabet_size_, batch_first);
betas[idx(T-1, U-1)] = log_probs[idx(T-1, U-1) * 2];
for (int t = T-1; t >= 0; --t) {
for (int u = U-1; u >= 0; --u) {
if (u == U-1 && t < T-1)
betas[idx(t, U-1)] = betas[idx(t+1, U-1)] + log_probs[idx(t, U-1) * 2];
if (t == T-1 && u < U-1)
betas[idx(T-1, u)] = betas[idx(T-1, u+1)] + log_probs[idx(T-1, u) * 2 + 1];
if (t < T-1 && u < U-1) {
ProbT no_emit = betas[idx(t+1, u)] + log_probs[idx(t, u) * 2];
ProbT emit = betas[idx(t, u+1)] + log_probs[idx(t, u) * 2 + 1];
betas[idx(t, u)] = rnnt_helper::log_sum_exp<ProbT>(emit, no_emit);
}
}
}
#ifdef DEBUG_KERNEL
printf("cpu betas:\n");
printf("%d %d\n", T, U);
for (int t = 0; t < T; t++) {
for (int u = 0; u < U; u++) {
printf("%.2f ", betas[idx(t, u)]);
}
printf("\n");
}
printf("\n");
#endif
ProbT loglike = betas[0];
// Gradients w.r.t. log probabilities
for (int t = 0; t < T; ++t) {
for (int u = 0; u < U; ++u) {
if (t < T-1) {
ProbT g = alphas[idx(t, u)] + betas[idx(t+1, u)];
grad[idx(t, u, blank_)] = -std::exp(log_probs[idx(t, u) * 2] + g - loglike);
}
if (u < U-1) {
ProbT g = alphas[idx(t, u)] + betas[idx(t, u+1)];
grad[idx(t, u, labels[u])] = -std::exp(log_probs[idx(t, u) * 2 + 1] + g - loglike);
}
}
}
// gradient to the last blank transition
grad[idx(T-1, U-1, blank_)] = -std::exp(log_probs[idx(T-1, U-1) * 2] + alphas[idx(T-1, U-1)] - loglike);
return loglike;
}
template<typename ProbT>
rnntStatus_t
CpuRNNT<ProbT>::cost_and_grad(const ProbT* const log_probs,
ProbT* grads,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
// per minibatch memory
size_t per_minibatch_bytes = 0;
// alphas & betas
per_minibatch_bytes += sizeof(ProbT) * maxT_ * maxU_ * 2;
// blank & label log probability cache
per_minibatch_bytes += sizeof(ProbT) * maxT_ * maxU_ * 2;
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int U = label_lengths[mb] + 1; // Number of labels in transcription
int batch_size = alphabet_size_;
if (batch_first) batch_size = maxT_ * maxU_ * alphabet_size_;
costs[mb] = cost_and_grad_kernel(log_probs + mb * batch_size,
grads + mb * batch_size,
flat_labels + mb * (maxU_ - 1),
mb, T, U, mb * per_minibatch_bytes);
}
return RNNT_STATUS_SUCCESS;
}
template<typename ProbT>
rnntStatus_t
CpuRNNT<ProbT>::score_forward(const ProbT* const log_probs,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
// per minibatch memory
size_t per_minibatch_bytes = 0;
// alphas & betas
per_minibatch_bytes += sizeof(ProbT) * maxT_ * maxU_ * 2;
// blank & label log probability cache
per_minibatch_bytes += sizeof(ProbT) * maxT_ * maxU_ * 2;
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int U = label_lengths[mb] + 1; // Number of labels in transcription
int batch_size = alphabet_size_;
if (batch_first) batch_size = maxT_ * maxU_ * alphabet_size_;
CpuRNNT_index idx(U, maxU_, minibatch_, alphabet_size_, batch_first);
CpuRNNT_metadata rnntm(T, U, workspace_, mb * per_minibatch_bytes, blank_,
flat_labels + mb * (maxU_ - 1), log_probs + mb * batch_size, idx);
costs[mb] = -compute_alphas(rnntm.log_probs2, T, U, rnntm.alphas);
}
return RNNT_STATUS_SUCCESS;
}
|
epir_reply_mock.c |
#include "epir.h"
#define divide_up(a, b) (((a) / (b)) + (((a) % (b)) == 0 ? 0 : 1 ))
size_t epir_reply_size(const uint8_t dimension, const uint8_t packing, const size_t elem_size) {
size_t target_size = elem_size;
for(uint8_t d=0; d<dimension; d++) {
target_size = EPIR_CIPHER_SIZE * divide_up(target_size, packing);
}
return target_size;
}
size_t epir_reply_r_count(const uint8_t dimension, const uint8_t packing, const size_t elem_size) {
size_t r_count = 0;
size_t target_size = elem_size;
for(uint8_t d=0; d<dimension; d++) {
r_count += divide_up(target_size, packing);
target_size = EPIR_CIPHER_SIZE * divide_up(target_size, packing);
}
return r_count;
}
/**
* Generates a sample server reply.
*/
static inline void epir_reply_mock_(
unsigned char *reply,
const unsigned char *key,
const uint8_t dimension, const uint8_t packing,
const uint8_t *elem, const size_t elem_size, const unsigned char *r,
epir_ecelgamal_encrypt_fn encrypt) {
const size_t reply_size_final = epir_reply_size(dimension, packing, elem_size);
unsigned char *midstate = (unsigned char*)malloc(reply_size_final);
memcpy(reply, elem, elem_size);
size_t reply_size = elem_size;
size_t r_offset = 0;
for(size_t d=0; d<dimension; d++) {
const size_t midstate_size = EPIR_CIPHER_SIZE * divide_up(reply_size, packing);
#pragma omp parallel for
for(size_t i=0; i<divide_up(reply_size, packing); i++) {
uint64_t msg = 0;
for(size_t j=0; (j<packing)&&(i*packing+j<reply_size); j++) {
msg |= reply[i * packing + j] << (8 * j);
}
encrypt(
&midstate[i * EPIR_CIPHER_SIZE], key, msg,
r ? &r[(r_offset++) * EPIR_SCALAR_SIZE] : NULL);
}
memcpy(reply, midstate, midstate_size);
reply_size = midstate_size;
}
free(midstate);
}
void epir_reply_mock(
unsigned char *reply,
const unsigned char *pubkey,
const uint8_t dimension, const uint8_t packing,
const uint8_t *elem, const size_t elem_size, const unsigned char *r) {
epir_reply_mock_(reply, pubkey, dimension, packing, elem, elem_size, r, epir_ecelgamal_encrypt);
}
void epir_reply_mock_fast(
unsigned char *reply,
const unsigned char *privkey,
const uint8_t dimension, const uint8_t packing,
const uint8_t *elem, const size_t elem_size, const unsigned char *r) {
epir_reply_mock_(reply, privkey, dimension, packing, elem, elem_size, r, epir_ecelgamal_encrypt_fast);
}
|
mattran.c | #include "matrix.h"
/** \brief Computes the transpose of a matrix
*
* \param[in] A Input matrix
* \param[in] result Matrix to store the result
* \return \f$ \mathbf{A}^T \f$
*
*/
MATRIX mat_tran(MATRIX A, MATRIX result)
{
int i, j, m, n;
m = MatCol(A);
n = MatRow(A);
if(result==NULL) if((result = mat_creat(m,n, UNDEFINED))==NULL)
return mat_error(MAT_MALLOC);
#pragma omp parallel for private(j)
for(i=0; i<m; ++i)
{
for (j=0; j<n; ++j)
{
result[i][j] = A[j][i];
}
}
return result;
}
|
cloudkeychain_fmt_plug.c | /* 1Password Cloud Keychain cracker patch for JtR. Hacked together during
* April of 2013 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru.kholia at gmail.com>,
* Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> and Copyright (c) 2012
* magnum, and it is hereby released to the general public under the following
* terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* This software is based on "onepasswordpy" project but no actual code is
* borrowed from it.
*
* "onepasswordpy" project is at https://github.com/Roguelazer/onepasswordpy
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_cloud_keychain;
#elif FMT_REGISTERS_H
john_register_one(&fmt_cloud_keychain);
#else
#include <stdint.h>
#include <string.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "sha2.h"
#include "pbkdf2_hmac_sha512.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "cloudkeychain"
#define FORMAT_NAME "1Password Cloud Keychain"
#define FORMAT_TAG "$cloudkeychain$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define HASH_LENGTH 64
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define PLAINTEXT_LENGTH 111
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define SALTLEN 32
#define IVLEN 16
#define CTLEN 2048
#define EHMLEN 32
#define PAD_SIZE 128
static struct fmt_tests cloud_keychain_tests[] = {
{"$cloudkeychain$16$2e57e8b57eda4d99df2fe02324960044$227272$336$6f706461746130310001000000000000881d65af6b863f6678d484ff551bc843a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b6fde10044103924d8275bf9bfadc98540ae61c5e59be06c5bca981460345bd29$256$16$881d65af6b863f6678d484ff551bc843$272$a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b$32$6fde10044103924d8275bf9bfadc98540ae61c5e59be06c5bca981460345bd29$304$6f706461746130310001000000000000881d65af6b863f6678d484ff551bc843a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b", "fred"},
// https://cache.agilebits.com/security-kb/freddy-2013-12-04.tar.gz, This is a sample OPVault file. The Master Password for it is freddy.
{"$cloudkeychain$16$3f4a4e30c37a3b0e7020a38e4ac69242$50000$336$6f706461746130310001000000000000237c26e13beb237a85b8eacc4bddd111a7bb7bee7cf71f019df9268cb3751d563d1bebf0331e7def4c26eeb90e61d2c2339b3c2d23ce75e969f250a1be823732823687950be19722f2dc92f02e614352c082d04358c421c1ddc90d07d8c6c9fb46255846ef950f14547e5b72b32a0e64cf3d24646d41b7fdd57534a1dd808d15e8dfe4299ef7ee8a3e923dc28496504cacb0be647a4600797ade6cb41694c2eb4d41b674ce762d66e98895fde98dda862b84720874b09b080b50ef9514b4ea0e3a19f5d51ccb8850cd26623e56dadef2bcbc625194dd107f663a7548f991803075874ecc4fc98b785b4cd56c3ce9bcb23ccf70f1908fc85a5b9520cd20d9d26a3bfb29ac289c1262302c82f6b0877d566369b98fb551fb9d044434c4cb1c50dcb5bb5a07ad0315fd9742d7d0edc9b9ed685bfa76978e228fdaa237dae4152731$256$16$237c26e13beb237a85b8eacc4bddd111$272$a7bb7bee7cf71f019df9268cb3751d563d1bebf0331e7def4c26eeb90e61d2c2339b3c2d23ce75e969f250a1be823732823687950be19722f2dc92f02e614352c082d04358c421c1ddc90d07d8c6c9fb46255846ef950f14547e5b72b32a0e64cf3d24646d41b7fdd57534a1dd808d15e8dfe4299ef7ee8a3e923dc28496504cacb0be647a4600797ade6cb41694c2eb4d41b674ce762d66e98895fde98dda862b84720874b09b080b50ef9514b4ea0e3a19f5d51ccb8850cd26623e56dadef2bcbc625194dd107f663a7548f991803075874ecc4fc98b785b4cd56c3ce9bcb23ccf70f1908fc85a5b9520cd20d9d26a3bfb29ac289c1262302c82f6b0877d566369b98fb551fb9d044434c4cb1c50dc$32$b5bb5a07ad0315fd9742d7d0edc9b9ed685bfa76978e228fdaa237dae4152731$304$6f706461746130310001000000000000237c26e13beb237a85b8eacc4bddd111a7bb7bee7cf71f019df9268cb3751d563d1bebf0331e7def4c26eeb90e61d2c2339b3c2d23ce75e969f250a1be823732823687950be19722f2dc92f02e614352c082d04358c421c1ddc90d07d8c6c9fb46255846ef950f14547e5b72b32a0e64cf3d24646d41b7fdd57534a1dd808d15e8dfe4299ef7ee8a3e923dc28496504cacb0be647a4600797ade6cb41694c2eb4d41b674ce762d66e98895fde98dda862b84720874b09b080b50ef9514b4ea0e3a19f5d51ccb8850cd26623e56dadef2bcbc625194dd107f663a7548f991803075874ecc4fc98b785b4cd56c3ce9bcb23ccf70f1908fc85a5b9520cd20d9d26a3bfb29ac289c1262302c82f6b0877d566369b98fb551fb9d044434c4cb1c50dc", "freddy"},
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt {
unsigned int saltlen;
unsigned char salt[SALTLEN];
unsigned int iterations;
unsigned int masterkeylen;
unsigned char masterkey[CTLEN];
unsigned int plaintextlen;
unsigned int ivlen;
unsigned char iv[32];
unsigned int cryptextlen;
unsigned char cryptext[CTLEN];
unsigned int expectedhmaclen;
unsigned char expectedhmac[EHMLEN];
unsigned int hmacdatalen;
unsigned char hmacdata[CTLEN];
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
cracked = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cracked));
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int len, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "$")) == NULL) /* salt length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra)/2 != len || extra)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iterations */
goto err;
if (!isdecu(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* masterkey length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* masterkey */
goto err;
if (hexlenl(p, &extra)/2 != len || extra)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* plaintext length */
goto err;
if (!isdecu(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iv length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len > IVLEN)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iv */
goto err;
if (hexlenl(p, &extra) / 2 != len || extra)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* cryptext length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len > CTLEN)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* cryptext */
goto err;
if (hexlenl(p, &extra)/2 != len || extra)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* expectedhmac length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len > EHMLEN)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* expectedhmac */
goto err;
if (hexlenl(p, &extra)/2 != len || extra)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* hmacdata length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len > CTLEN)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* hmacdata */
goto err;
if (hexlenl(p, &extra)/2 != len || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$cloudkeychain$" */
p = strtokm(ctcopy, "$");
cs.saltlen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.saltlen; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
cs.iterations = atou(p);
p = strtokm(NULL, "$");
cs.masterkeylen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.masterkeylen; i++)
cs.masterkey[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
cs.plaintextlen = atou(p);
p = strtokm(NULL, "$");
cs.ivlen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.ivlen; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
cs.cryptextlen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.cryptextlen; i++)
cs.cryptext[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
cs.expectedhmaclen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.expectedhmaclen; i++)
cs.expectedhmac[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
cs.hmacdatalen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.hmacdatalen; i++)
cs.hmacdata[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void hmac_sha256(uint8_t * pass, uint8_t passlen, uint8_t * salt,
uint32_t saltlen, uint32_t add, uint64_t * ret)
{
uint8_t i, ipad[64], opad[64];
SHA256_CTX ctx;
memset(ipad, 0x36, 64);
memset(opad, 0x5c, 64);
for (i = 0; i < passlen; i++) {
ipad[i] ^= pass[i];
opad[i] ^= pass[i];
}
SHA256_Init(&ctx);
SHA256_Update(&ctx, ipad, 64);
SHA256_Update(&ctx, salt, saltlen);
if (add > 0) {
#if ARCH_LITTLE_ENDIAN
add = JOHNSWAP(add);
#endif
SHA256_Update(&ctx, &add, 4); }
SHA256_Final((uint8_t *) ret, &ctx);
SHA256_Init(&ctx);
SHA256_Update(&ctx, opad, 64);
SHA256_Update(&ctx, (uint8_t *) ret, 32);
SHA256_Final((uint8_t *) ret, &ctx);
}
static int ckcdecrypt(unsigned char *key)
{
uint64_t tmp[8];
hmac_sha256(key + 32, 32, cur_salt->hmacdata, cur_salt->hmacdatalen, 0, tmp);
if (!memcmp(tmp, cur_salt->expectedhmac, 32))
return 1;
else
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
#ifdef SSE_GROUP_SZ_SHA512
int lens[SSE_GROUP_SZ_SHA512], i;
unsigned char *pin[SSE_GROUP_SZ_SHA512];
uint64_t key[SSE_GROUP_SZ_SHA512][8];
union {
uint32_t *pout[SSE_GROUP_SZ_SHA512];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = (uint32_t*)(key[i]);
}
pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, &(x.poutc), HASH_LENGTH, 0);
for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i)
cracked[index+i] = ckcdecrypt((unsigned char*)(key[i]));
#else
uint64_t key[8];
pbkdf2_sha512((const unsigned char*)(saved_key[index]), strlen(saved_key[index]),
cur_salt->salt, cur_salt->saltlen,
cur_salt->iterations, (unsigned char*)key, HASH_LENGTH, 0);
cracked[index] = ckcdecrypt((unsigned char*)key);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void cloud_keychain_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int)my_salt->iterations;
}
struct fmt_main fmt_cloud_keychain = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{
"iteration count",
},
{ FORMAT_TAG },
cloud_keychain_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
cloud_keychain_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
gpu_reduce_demo.c | //------------------------------------------------------------------------------
// GraphBLAS/Demo/Program/reduce_demo: reduce a matrix to a scalar
//------------------------------------------------------------------------------
// TODO for GPU: add this to CMakelists.txt, or merge with reduce_demo.c
#include "GraphBLAS.h"
// #define N 65536
#define N 16384
int main (void)
{
#if defined ( _OPENMP )
double t0 = omp_get_wtime ( ) ;
#endif
// start GraphBLAS
GrB_init (GrB_NONBLOCKING) ;
printf ("demo: reduce a matrix to a scalar\n") ;
int nthreads_max ;
GxB_get (GxB_NTHREADS, &nthreads_max) ;
printf ("# of threads: %d\n", nthreads_max) ;
#if defined ( _OPENMP )
t0 = omp_get_wtime ( ) - t0 ;
printf ("GPU warmup time: %g\n", t0) ;
t0 = omp_get_wtime ( ) ;
#endif
GrB_Index nrows = N ;
GrB_Index ncols = N ;
GrB_Matrix A ;
GrB_Matrix_new (&A, GrB_INT64, nrows, ncols) ;
GrB_Index *I = malloc (nrows * ncols * sizeof (GrB_Index)) ;
GrB_Index *J = malloc (nrows * ncols * sizeof (GrB_Index)) ;
int64_t *X = malloc (nrows * ncols * sizeof (int64_t)) ;
#pragma omp parallel for num_threads(nthreads_max) collapse(2) \
schedule(static)
for (int64_t i = 0 ; i < nrows ; i++)
{
for (int64_t j = 0 ; j < ncols ; j++)
{
int64_t k = i * N + j ;
// int x = (int) (rand ( ) & 0xFF) ;
int x = (int) (k & 0xFF) ;
I [k] = i ;
J [k] = j ;
X [k] = x ;
}
}
GrB_Index nvals = ((size_t) N) * ((size_t) N) ;
GrB_Matrix_build (A, I, J, X, nvals, GrB_PLUS_INT64) ;
GxB_print (A, 2) ;
free (I) ;
free (J) ;
free (X) ;
#if defined ( _OPENMP )
t0 = omp_get_wtime ( ) - t0 ;
printf ("time to create matrix: %g\n", t0) ;
#endif
GrB_Index result ;
GrB_Matrix B ;
GrB_Matrix_new (&B, GrB_INT64, 2000, 2000) ;
for (int64_t i = 0 ; i < 2000 ; i++)
{
for (int64_t j = 0 ; j < 2000 ; j++)
{
GrB_Matrix_setElement (B, 1, i, j) ;
}
}
GrB_Index ignore ;
GrB_Matrix_nvals (&ignore, B) ;
#if defined ( _OPENMP )
double tfirst = omp_get_wtime ( ) ;
#endif
GrB_reduce (&result, NULL, GxB_PLUS_INT64_MONOID, B, NULL) ;
#if defined ( _OPENMP )
tfirst = omp_get_wtime ( ) - tfirst ;
printf ("warmup %g sec (on all threads/gpu, small matrix)\n", tfirst) ;
printf ("result: %"PRIu64"\n", result) ;
#endif
double t1 ;
printf ("\nreduce to a scalar:\n") ;
for (int nthreads = 1 ; nthreads <= nthreads_max ; nthreads++)
{
GxB_set (GxB_NTHREADS, nthreads) ;
#if defined ( _OPENMP )
double t = omp_get_wtime ( ) ;
#endif
GrB_reduce (&result, NULL, GxB_PLUS_INT64_MONOID, A, NULL) ;
#if defined ( _OPENMP )
t = omp_get_wtime ( ) - t ;
if (nthreads == 1) t1 = t ;
printf ("nthreads %3d time: %12.6f speedup %8.2f\n",
nthreads, t, t1/t) ;
#endif
}
// printf ("result %d\n", result) ;
printf ("result %"PRId64"\n", (int64_t) result) ;
// free everyting
GrB_free (&A) ;
GrB_finalize ( ) ;
}
|
GB_binop__times_fc64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fc64)
// A*D function (colscale): GB (_AxD__times_fc64)
// D*A function (rowscale): GB (_DxB__times_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__times_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__times_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fc64)
// C=scalar+B GB (_bind1st__times_fc64)
// C=scalar+B' GB (_bind1st_tran__times_fc64)
// C=A+scalar GB (_bind2nd__times_fc64)
// C=A'+scalar GB (_bind2nd_tran__times_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// A pattern? 0
// B type: GxB_FC64_t
// B pattern? 0
// BinaryOp: cij = GB_FC64_mul (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_mul (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_FC64 || GxB_NO_TIMES_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_mul (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_mul (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_mul (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__times_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_mul (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ft.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - FT
This benchmark is an OpenMP C version of the NPB FT code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: D. Bailey
W. Saphir
OpenMP C version: S. Satoh
3.0 structure translation: M. Popov
--------------------------------------------------------------------*/
#include "../common/npb-C.h"
/* global variables */
#include "global.h"
/* function declarations */
static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX],
int t, int indexmap[NZ][NY][NX], int d[3]);
static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]);
static void ipow46(double a, int exponent, double *result);
static void setup(void);
static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]);
static void print_timers(void);
static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]);
static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]);
static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]);
static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]);
static void fft_init (int n);
static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]);
static void fftz2 (int is, int l, int m, int n, int ny, int ny1,
dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]);
static int ilog2(int n);
static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]);
static void verify (int d1, int d2, int d3, int nt,
boolean *verified, char *class);
/*--------------------------------------------------------------------
c FT benchmark
c-------------------------------------------------------------------*/
int main(int argc, char **argv) {
/*c-------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i, ierr;
/*------------------------------------------------------------------
c u0, u1, u2 are the main arrays in the problem.
c Depending on the decomposition, these arrays will have different
c dimensions. To accomodate all possibilities, we allocate them as
c one-dimensional arrays and pass them to subroutines for different
c views
c - u0 contains the initial (transformed) initial condition
c - u1 and u2 are working arrays
c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the
c time evolution operator.
c-----------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Large arrays are in common so that they are allocated on the
c heap rather than the stack. This common block is not
c referenced directly anywhere else. Padding is to avoid accidental
c cache problems, since all array sizes are powers of two.
c-------------------------------------------------------------------*/
static dcomplex u0[NZ][NY][NX];
static dcomplex pad1[3];
static dcomplex u1[NZ][NY][NX];
static dcomplex pad2[3];
static dcomplex u2[NZ][NY][NX];
static dcomplex pad3[3];
static int indexmap[NZ][NY][NX];
int iter;
int nthreads = 1;
double total_time, mflops;
boolean verified;
char class;
/*--------------------------------------------------------------------
c Run the entire problem once to make sure all data is touched.
c This reduces variable startup costs, which is important for such a
c short benchmark. The other NPB 2 implementations are similar.
c-------------------------------------------------------------------*/
for (i = 0; i < T_MAX; i++) {
timer_clear(i);
}
setup();
compute_indexmap(indexmap, dims[2]);
compute_initial_conditions(u1, dims[0]);
fft_init (dims[0][0]);
fft(1, u1, u0);
/*--------------------------------------------------------------------
c Start over from the beginning. Note that all operations must
c be timed, in contrast to other benchmarks.
c-------------------------------------------------------------------*/
for (i = 0; i < T_MAX; i++) {
timer_clear(i);
}
timer_start(T_TOTAL);
if (TIMERS_ENABLED == TRUE) timer_start(T_SETUP);
compute_indexmap(indexmap, dims[2]);
compute_initial_conditions(u1, dims[0]);
fft_init (dims[0][0]);
if (TIMERS_ENABLED == TRUE) {
timer_stop(T_SETUP);
}
if (TIMERS_ENABLED == TRUE) {
timer_start(T_FFT);
}
fft(1, u1, u0);
if (TIMERS_ENABLED == TRUE) {
timer_stop(T_FFT);
}
for (iter = 1; iter <= niter; iter++) {
if (TIMERS_ENABLED == TRUE) {
timer_start(T_EVOLVE);
}
evolve(u0, u1, iter, indexmap, dims[0]);
if (TIMERS_ENABLED == TRUE) {
timer_stop(T_EVOLVE);
}
if (TIMERS_ENABLED == TRUE) {
timer_start(T_FFT);
}
fft(-1, u1, u2);
if (TIMERS_ENABLED == TRUE) {
timer_stop(T_FFT);
}
if (TIMERS_ENABLED == TRUE) {
timer_start(T_CHECKSUM);
}
checksum(iter, u2, dims[0]);
if (TIMERS_ENABLED == TRUE) {
timer_stop(T_CHECKSUM);
}
}
verify(NX, NY, NZ, niter, &verified, &class);
#pragma omp parallel
{
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop(T_TOTAL);
total_time = timer_read(T_TOTAL);
if( total_time != 0.0) {
mflops = 1.0e-6*(double)(NTOTAL) *
(14.8157+7.19641*log((double)(NTOTAL))
+ (5.23518+7.21113*log((double)(NTOTAL)))*niter)
/total_time;
} else {
mflops = 0.0;
}
c_print_results("FT", class, NX, NY, NZ, niter, nthreads,
total_time, mflops, " floating point", verified,
NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
if (TIMERS_ENABLED == TRUE) print_timers();
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX],
int t, int indexmap[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c evolve u0 -> u1 (t time steps) in fourier space
c-------------------------------------------------------------------*/
int i, j, k;
#pragma omp parallel for default(shared) private(i,j,k)
for (k = 0; k < d[2]; k++) {
for (j = 0; j < d[1]; j++) {
for (i = 0; i < d[0]; i++) {
crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]);
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Fill in array u0 with initial conditions from
c random number generator
c-------------------------------------------------------------------*/
int k;
double x0, start, an, dummy;
static double tmp[NX*2*MAXDIM+1];
int i,j,t;
start = SEED;
/*--------------------------------------------------------------------
c Jump to the starting element for our first plane.
c-------------------------------------------------------------------*/
ipow46(A, (zstart[0]-1)*2*NX*NY + (ystart[0]-1)*2*NX, &an);
dummy = randlc(&start, an);
ipow46(A, 2*NX*NY, &an);
/*--------------------------------------------------------------------
c Go through by z planes filling in one square at a time.
c-------------------------------------------------------------------*/
for (k = 0; k < dims[0][2]; k++) {
x0 = start;
vranlc(2*NX*dims[0][1], &x0, A, tmp);
t = 1;
for (j = 0; j < dims[0][1]; j++)
for (i = 0; i < NX; i++) {
u0[k][j][i].real = tmp[t++];
u0[k][j][i].imag = tmp[t++];
}
if (k != dims[0][2]) dummy = randlc(&start, an);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void ipow46(double a, int exponent, double *result) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute a^exponent mod 2^46
c-------------------------------------------------------------------*/
double dummy, q, r;
int n, n2;
/*--------------------------------------------------------------------
c Use
c a^n = a^(n/2)*a^(n/2) if n even else
c a^n = a*a^(n-1) if n odd
c-------------------------------------------------------------------*/
*result = 1;
if (exponent == 0) return;
q = a;
r = 1;
n = exponent;
while (n > 1) {
n2 = n/2;
if (n2 * 2 == n) {
dummy = randlc(&q, q);
n = n2;
} else {
dummy = randlc(&r, q);
n = n-1;
}
}
dummy = randlc(&r, q);
*result = r;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void setup(void) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int ierr, i, j, fstatus;
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - FT Benchmark\n\n");
niter = NITER_DEFAULT;
printf(" Size : %3dx%3dx%3d\n", NX, NY, NZ);
printf(" Iterations : %7d\n", niter);
/* 1004 format(' Number of processes : ', i7)
1005 format(' Processor array : ', i3, 'x', i3)
1006 format(' WARNING: compiled for ', i5, ' processes. ',
> ' Will not verify. ')*/
for (i = 0;i < 3 ; i++) {
dims[i][0] = NX;
dims[i][1] = NY;
dims[i][2] = NZ;
}
for (i = 0; i < 3; i++) {
xstart[i] = 1;
xend[i] = NX;
ystart[i] = 1;
yend[i] = NY;
zstart[i] = 1;
zend[i] = NZ;
}
/*--------------------------------------------------------------------
c Set up info for blocking of ffts and transposes. This improves
c performance on cache-based systems. Blocking involves
c working on a chunk of the problem at a time, taking chunks
c along the first, second, or third dimension.
c
c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim)
c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims)
c Since 1st dim is always in processor, we'll assume it's long enough
c (default blocking factor is 16 so min size for 1st dim is 16)
c The only case we have to worry about is cffts1 in a 2d decomposition.
c so the blocking factor should not be larger than the 2nd dimension.
c-------------------------------------------------------------------*/
fftblock = FFTBLOCK_DEFAULT;
fftblockpad = FFTBLOCKPAD_DEFAULT;
if (fftblock != FFTBLOCK_DEFAULT) fftblockpad = fftblock+3;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2
c for time evolution exponent.
c-------------------------------------------------------------------*/
int i, j, k, ii, ii2, jj, ij2, kk;
double ap;
/*--------------------------------------------------------------------
c basically we want to convert the fortran indices
c 1 2 3 4 5 6 7 8
c to
c 0 1 2 3 -4 -3 -2 -1
c The following magic formula does the trick:
c mod(i-1+n/2, n) - n/2
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(i,j,k,ii,ii2,jj,ij2,kk)
for (i = 0; i < dims[2][0]; i++) {
ii = (i+1+xstart[2]-2+NX/2)%NX - NX/2;
ii2 = ii*ii;
for (j = 0; j < dims[2][1]; j++) {
jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2;
ij2 = jj*jj+ii2;
for (k = 0; k < dims[2][2]; k++) {
kk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2;
indexmap[k][j][i] = kk*kk+ij2;
}
}
}
/*--------------------------------------------------------------------
c compute array of exponentials for time evolution.
c-------------------------------------------------------------------*/
ap = - 4.0 * ALPHA * PI * PI;
ex[0] = 1.0;
ex[1] = exp(ap);
for (i = 2; i <= EXPMAX; i++) {
ex[i] = ex[i-1]*ex[1];
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void print_timers(void) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i;
char *tstrings[] = { " total ",
" setup ",
" fft ",
" evolve ",
" checksum ",
" fftlow ",
" fftcopy " };
for (i = 0; i < T_MAX; i++) {
if (timer_read(i) != 0.0) {
printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i));
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
dcomplex y0[NX][FFTBLOCKPAD];
dcomplex y1[NX][FFTBLOCKPAD];
/*--------------------------------------------------------------------
c note: args x1, x2 must be different arrays
c note: args for cfftsx are (direction, layout, xin, xout, scratch)
c xin/xout may be the same and it can be somewhat faster
c if they are
c-------------------------------------------------------------------*/
if (dir == 1) {
cffts1(1, dims[0], x1, x1, y0, y1); /* x1 -> x1 */
cffts2(1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */
cffts3(1, dims[2], x1, x2, y0, y1); /* x1 -> x2 */
} else {
cffts3(-1, dims[2], x1, x1, y0, y1); /* x1 -> x1 */
cffts2(-1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */
cffts1(-1, dims[0], x1, x2, y0, y1); /* x1 -> x2 */
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i, j, k, jj;
for (i = 0; i < 3; i++) {
logd[i] = ilog2(d[i]);
}
#pragma omp parallel default(shared) private(i,j,k,jj) shared(is)
{
dcomplex y0[NX][FFTBLOCKPAD];
dcomplex y1[NX][FFTBLOCKPAD];
#pragma omp for
for (k = 0; k < d[2]; k++) {
for (jj = 0; jj <= d[1] - fftblock; jj+=fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < fftblock; j++) {
for (i = 0; i < d[0]; i++) {
y0[i][j].real = x[k][j+jj][i].real;
y0[i][j].imag = x[k][j+jj][i].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz (is, logd[0],
d[0], y0, y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < fftblock; j++) {
for (i = 0; i < d[0]; i++) {
xout[k][j+jj][i].real = y0[i][j].real;
xout[k][j+jj][i].imag = y0[i][j].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i, j, k, ii;
for (i = 0; i < 3; i++) {
logd[i] = ilog2(d[i]);
}
#pragma omp parallel default(shared) private(i,j,k,ii) shared(is)
{
dcomplex y0[NX][FFTBLOCKPAD];
dcomplex y1[NX][FFTBLOCKPAD];
#pragma omp for
for (k = 0; k < d[2]; k++) {
for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < d[1]; j++) {
for (i = 0; i < fftblock; i++) {
y0[j][i].real = x[k][j][i+ii].real;
y0[j][i].imag = x[k][j][i+ii].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz (is, logd[1],
d[1], y0, y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < d[1]; j++) {
for (i = 0; i < fftblock; i++) {
xout[k][j][i+ii].real = y0[j][i].real;
xout[k][j][i+ii].imag = y0[j][i].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i, j, k, ii;
for (i = 0;i < 3; i++) {
logd[i] = ilog2(d[i]);
}
#pragma omp parallel default(shared) private(i,j,k,ii) shared(is)
{
dcomplex y0[NX][FFTBLOCKPAD];
dcomplex y1[NX][FFTBLOCKPAD];
#pragma omp for
for (j = 0; j < d[1]; j++) {
for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (k = 0; k < d[2]; k++) {
for (i = 0; i < fftblock; i++) {
y0[k][i].real = x[k][j][i+ii].real;
y0[k][i].imag = x[k][j][i+ii].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz (is, logd[2],
d[2], y0, y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (k = 0; k < d[2]; k++) {
for (i = 0; i < fftblock; i++) {
xout[k][j][i+ii].real = y0[k][i].real;
xout[k][j][i+ii].imag = y0[k][i].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fft_init (int n) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the roots-of-unity array that will be used for subsequent FFTs.
c-------------------------------------------------------------------*/
int m,nu,ku,i,j,ln;
double t, ti;
/*--------------------------------------------------------------------
c Initialize the U array with sines and cosines in a manner that permits
c stride one access at each FFT iteration.
c-------------------------------------------------------------------*/
nu = n;
m = ilog2(n);
u[0].real = (double)m;
u[0].imag = 0.0;
ku = 1;
ln = 1;
for (j = 1; j <= m; j++) {
t = PI / ln;
for (i = 0; i <= ln - 1; i++) {
ti = i * t;
u[i+ku].real = cos(ti);
u[i+ku].imag = sin(ti);
}
ku = ku + ln;
ln = 2 * ln;
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Computes NY N-point complex-to-complex FFTs of X using an algorithm due
c to Swarztrauber. X is both the input and the output array, while Y is a
c scratch array. It is assumed that N = 2^M. Before calling CFFTZ to
c perform FFTs, the array U must be initialized by calling CFFTZ with IS
c set to 0 and M set to MX, where MX is the maximum value of M for any
c subsequent call.
c-------------------------------------------------------------------*/
int i,j,l,mx;
/*--------------------------------------------------------------------
c Check if input parameters are invalid.
c-------------------------------------------------------------------*/
mx = (int)(u[0].real);
if ((is != 1 && is != -1) || m < 1 || m > mx) {
printf("CFFTZ: Either U has not been initialized, or else\n"
"one of the input parameters is invalid%5d%5d%5d\n",
is, m, mx);
exit(1);
}
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= m; l+=2) {
fftz2 (is, l, m, n, fftblock, fftblockpad, u, x, y);
if (l == m) break;
fftz2 (is, l + 1, m, n, fftblock, fftblockpad, u, y, x);
}
/*--------------------------------------------------------------------
c Copy Y to X.
c-------------------------------------------------------------------*/
if (m % 2 == 1) {
for (j = 0; j < n; j++) {
for (i = 0; i < fftblock; i++) {
x[j][i].real = y[j][i].real;
x[j][i].imag = y[j][i].imag;
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fftz2 (int is, int l, int m, int n, int ny, int ny1,
dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs the L-th iteration of the second variant of the Stockham FFT.
c-------------------------------------------------------------------*/
int k,n1,li,lj,lk,ku,i,j,i11,i12,i21,i22;
dcomplex u1,x11,x21;
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
n1 = n / 2;
if (l-1 == 0) {
lk = 1;
} else {
lk = 2 << ((l - 1)-1);
}
if (m-l == 0) {
li = 1;
} else {
li = 2 << ((m - l)-1);
}
lj = 2 * lk;
ku = li;
for (i = 0; i < li; i++) {
i11 = i * lk;
i12 = i11 + n1;
i21 = i * lj;
i22 = i21 + lk;
if (is >= 1) {
u1.real = u[ku+i].real;
u1.imag = u[ku+i].imag;
} else {
u1.real = u[ku+i].real;
u1.imag = -u[ku+i].imag;
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k = 0; k < lk; k++) {
for (j = 0; j < ny; j++) {
double x11real, x11imag;
double x21real, x21imag;
x11real = x[i11+k][j].real;
x11imag = x[i11+k][j].imag;
x21real = x[i12+k][j].real;
x21imag = x[i12+k][j].imag;
y[i21+k][j].real = x11real + x21real;
y[i21+k][j].imag = x11imag + x21imag;
y[i22+k][j].real = u1.real * (x11real - x21real)
- u1.imag * (x11imag - x21imag);
y[i22+k][j].imag = u1.real * (x11imag - x21imag)
+ u1.imag * (x11real - x21real);
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static int ilog2(int n) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int nn, lg;
if (n == 1) {
return 0;
}
lg = 1;
nn = 2;
while (nn < n) {
nn = nn << 1;
lg++;
}
return lg;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]) {
#pragma omp parallel default(shared)
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int j, q,r,s, ierr;
dcomplex chk,allchk;
chk.real = 0.0;
chk.imag = 0.0;
#pragma omp for nowait
for (j = 1; j <= 1024; j++) {
q = j%NX+1;
if (q >= xstart[0] && q <= xend[0]) {
r = (3*j)%NY+1;
if (r >= ystart[0] && r <= yend[0]) {
s = (5*j)%NZ+1;
if (s >= zstart[0] && s <= zend[0]) {
cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]);
}
}
}
}
#pragma omp critical
{
sums[i].real += chk.real;
sums[i].imag += chk.imag;
}
#pragma omp barrier
#pragma omp single
{
/* complex % real */
sums[i].real = sums[i].real/(double)(NTOTAL);
sums[i].imag = sums[i].imag/(double)(NTOTAL);
printf("T = %5d Checksum = %22.12e %22.12e\n",
i, sums[i].real, sums[i].imag);
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void verify (int d1, int d2, int d3, int nt,
boolean *verified, char *class) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int ierr, size, i;
double err, epsilon;
/*--------------------------------------------------------------------
c Sample size reference checksums
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Class S size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_s[6+1] = { 0.0,
5.546087004964e+02,
5.546385409189e+02,
5.546148406171e+02,
5.545423607415e+02,
5.544255039624e+02,
5.542683411902e+02 };
double vdata_imag_s[6+1] = { 0.0,
4.845363331978e+02,
4.865304269511e+02,
4.883910722336e+02,
4.901273169046e+02,
4.917475857993e+02,
4.932597244941e+02 };
/*--------------------------------------------------------------------
c Class W size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_w[6+1] = { 0.0,
5.673612178944e+02,
5.631436885271e+02,
5.594024089970e+02,
5.560698047020e+02,
5.530898991250e+02,
5.504159734538e+02 };
double vdata_imag_w[6+1] = { 0.0,
5.293246849175e+02,
5.282149986629e+02,
5.270996558037e+02,
5.260027904925e+02,
5.249400845633e+02,
5.239212247086e+02 };
/*--------------------------------------------------------------------
c Class A size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_a[6+1] = { 0.0,
5.046735008193e+02,
5.059412319734e+02,
5.069376896287e+02,
5.077892868474e+02,
5.085233095391e+02,
5.091487099959e+02 };
double vdata_imag_a[6+1] = { 0.0,
5.114047905510e+02,
5.098809666433e+02,
5.098144042213e+02,
5.101336130759e+02,
5.104914655194e+02,
5.107917842803e+02 };
/*--------------------------------------------------------------------
c Class B size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_b[20+1] = { 0.0,
5.177643571579e+02,
5.154521291263e+02,
5.146409228649e+02,
5.142378756213e+02,
5.139626667737e+02,
5.137423460082e+02,
5.135547056878e+02,
5.133910925466e+02,
5.132470705390e+02,
5.131197729984e+02,
5.130070319283e+02,
5.129070537032e+02,
5.128182883502e+02,
5.127393733383e+02,
5.126691062020e+02,
5.126064276004e+02,
5.125504076570e+02,
5.125002331720e+02,
5.124551951846e+02,
5.124146770029e+02 };
double vdata_imag_b[20+1] = { 0.0,
5.077803458597e+02,
5.088249431599e+02,
5.096208912659e+02,
5.101023387619e+02,
5.103976610617e+02,
5.105948019802e+02,
5.107404165783e+02,
5.108576573661e+02,
5.109577278523e+02,
5.110460304483e+02,
5.111252433800e+02,
5.111968077718e+02,
5.112616233064e+02,
5.113203605551e+02,
5.113735928093e+02,
5.114218460548e+02,
5.114656139760e+02,
5.115053595966e+02,
5.115415130407e+02,
5.115744692211e+02 };
/*--------------------------------------------------------------------
c Class C size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_c[20+1] = { 0.0,
5.195078707457e+02,
5.155422171134e+02,
5.144678022222e+02,
5.140150594328e+02,
5.137550426810e+02,
5.135811056728e+02,
5.134569343165e+02,
5.133651975661e+02,
5.132955192805e+02,
5.132410471738e+02,
5.131971141679e+02,
5.131605205716e+02,
5.131290734194e+02,
5.131012720314e+02,
5.130760908195e+02,
5.130528295923e+02,
5.130310107773e+02,
5.130103090133e+02,
5.129905029333e+02,
5.129714421109e+02 };
double vdata_imag_c[20+1] = { 0.0,
5.149019699238e+02,
5.127578201997e+02,
5.122251847514e+02,
5.121090289018e+02,
5.121143685824e+02,
5.121496764568e+02,
5.121870921893e+02,
5.122193250322e+02,
5.122454735794e+02,
5.122663649603e+02,
5.122830879827e+02,
5.122965869718e+02,
5.123075927445e+02,
5.123166486553e+02,
5.123241541685e+02,
5.123304037599e+02,
5.123356167976e+02,
5.123399592211e+02,
5.123435588985e+02,
5.123465164008e+02 };
epsilon = 1.0e-12;
*verified = TRUE;
*class = 'U';
if (d1 == 64 &&
d2 == 64 &&
d3 == 64 &&
nt == 6) {
*class = 'S';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_s[i]) / vdata_real_s[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_s[i]) / vdata_imag_s[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 128 &&
d2 == 128 &&
d3 == 32 &&
nt == 6) {
*class = 'W';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_w[i]) / vdata_real_w[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_w[i]) / vdata_imag_w[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 256 &&
d2 == 256 &&
d3 == 128 &&
nt == 6) {
*class = 'A';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_a[i]) / vdata_real_a[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_a[i]) / vdata_imag_a[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 512 &&
d2 == 256 &&
d3 == 256 &&
nt == 20) {
*class = 'B';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_b[i]) / vdata_real_b[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_b[i]) / vdata_imag_b[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 512 &&
d2 == 512 &&
d3 == 512 &&
nt == 20) {
*class = 'C';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_c[i]) / vdata_real_c[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_c[i]) / vdata_imag_c[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
}
if (*class != 'U') {
printf("Result verification successful\n");
} else {
printf("Result verification failed\n");
}
printf("class = %1c\n", *class);
}
|
sink-3.c | /* { dg-do compile } */
/* { dg-options "-fopenmp" } */
/* Test that we can handle multiple undeclared sink variables
gracefully. */
void bar (int *);
void
foo ()
{
int i,j;
#pragma omp parallel for ordered(1)
for (i=0; i < 100; ++i)
{
#pragma omp ordered depend(sink:poo-1,paa+1) /* { dg-error "poo.*declared.*paa.*declared" } */
bar(&i); /* { dg-error "may not be closely nested" "" { target *-*-* } .-1 } */
#pragma omp ordered depend(source)
}
}
|
fc_kernel_int8_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Author: 1091545398@qq.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include <arm_neon.h>
#include "fc_kernel_int8_arm.h"
void gemv_1x8_int8(int32_t *biases, const float *scales, int8_t *inp, int8_t *kernel, long kernel_size,
int8_t *output) {
int8x8_t input;
int8x16_t weight_0_1, weight_2_3, weight_4_5, weight_6_7;
int16x8_t weight0_16, weight1_16, weight2_16, weight3_16;
int16x8_t weight4_16, weight5_16, weight6_16, weight7_16;
int32x4_t res = {0, 0, 0, 0};
int32x4_t res1 = {0, 0, 0, 0};
int8_t *input_ptr = inp;
int8_t *weight_ptr = kernel;
int remainw = (kernel_size >> 3) << 3;
for (int i = 0; i < remainw; i = i + 8)
{
input = vld1_s8(input_ptr);
weight_0_1 = vld1q_s8(weight_ptr);
weight_2_3 = vld1q_s8(weight_ptr + 16);
weight_4_5 = vld1q_s8(weight_ptr + 32);
weight_6_7 = vld1q_s8(weight_ptr + 48);
weight0_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 0)), vget_low_s8(weight_0_1));
weight1_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 1)), vget_high_s8(weight_0_1));
weight2_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 2)), vget_low_s8(weight_2_3));
weight3_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 3)), vget_high_s8(weight_2_3));
weight4_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 4)), vget_low_s8(weight_4_5));
weight5_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 5)), vget_high_s8(weight_4_5));
weight6_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 6)), vget_low_s8(weight_6_7));
weight7_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 7)), vget_high_s8(weight_6_7));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight0_16), vget_low_s16(weight1_16)));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight2_16), vget_low_s16(weight3_16)));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight4_16), vget_low_s16(weight5_16)));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight6_16), vget_low_s16(weight7_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight0_16), vget_high_s16(weight1_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight2_16), vget_high_s16(weight3_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight4_16), vget_high_s16(weight5_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight6_16), vget_high_s16(weight7_16)));
input_ptr += 8;
weight_ptr += 64;
}
for (int i = remainw; i < kernel_size; ++i)
{
weight0_16 = vmull_s8(vdup_n_s8(input_ptr[0]), vld1_s8(weight_ptr));
res = vaddq_s32(vmovl_s16(vget_low_s16(weight0_16)), res);
res1 = vaddq_s32(vmovl_s16(vget_high_s16(weight0_16)), res1);
input_ptr += 1;
weight_ptr += 8;
}
if (biases)
{
int32x4_t bias = vld1q_s32(biases);
int32x4_t bias1 = vld1q_s32(biases + 4);
res = vaddq_s32(res,bias);
res1 = vaddq_s32(res1,bias1);
}
float32x4_t res_f = vcvtq_f32_s32(res);
float32x4_t res1_f = vcvtq_f32_s32(res1);
float32x4_t scale = vld1q_f32(scales);
float32x4_t scale_1 = vld1q_f32(scales + 4);
res_f = vmulq_f32(res_f, scale);
res1_f = vmulq_f32(res1_f, scale_1);
res_f = vaddq_f32(res_f,vdupq_n_f32(0.5f));
res1_f = vaddq_f32(res1_f,vdupq_n_f32(0.5f));
res = vcvtq_s32_f32(res_f);
res1 = vcvtq_s32_f32(res1_f);
int16x4_t res_16 = vmovn_s32(res);
int16x4_t res1_16 = vmovn_s32(res1);
int8x8_t result = vmovn_s16(vcombine_s16(res_16, res1_16));
int8x8_t _m127 = vdup_n_s8(127);
int8x8_t _m_127 = vdup_n_s8(-127);
result = vmax_s8(_m_127, result);
result = vmin_s8(_m127, result);
vst1_s8(output, result);
}
void gemv_1x2_int8(const int32_t *biases, const float *scales, int8_t *inp, int8_t *kernel, long kernel_size,
int8_t *output)
{
int8_t *input_ptr = inp;
int8_t *weight_ptr = kernel;
int remainw = (kernel_size << 3) >> 3;
int8x8x2_t weight;
int8x8_t input;
int16x8_t out_16_0, out_16_1;
int32x4_t out_32_0, out_32_1;
int32_t sum0 = 0, sum1 = 0;
for (int i = 0; i < remainw; i = i + 8)
{
weight = vld2_s8(weight_ptr);
input = vld1_s8(input_ptr);
out_16_0 = vmull_s8(weight.val[0], input);
out_16_1 = vmull_s8(weight.val[1], input);
out_32_0 = vpaddlq_s16(out_16_0);
out_32_1 = vpaddlq_s16(out_16_1);
sum0 += vgetq_lane_s32(out_32_0, 0) + vgetq_lane_s32(out_32_0, 1) +
vgetq_lane_s32(out_32_0, 2) + vgetq_lane_s32(out_32_0, 3);
sum1 += vgetq_lane_s32(out_32_1, 0) + vgetq_lane_s32(out_32_1, 1) +
vgetq_lane_s32(out_32_1, 2) + vgetq_lane_s32(out_32_1, 3);
weight_ptr += 16;
input_ptr += 8;
}
for (int i = remainw; i < kernel_size; ++i)
{
sum0 += weight_ptr[0] * input_ptr[0];
sum1 += weight_ptr[1] * input_ptr[0];
input_ptr++;
weight_ptr += 2;
}
if (biases)
{
sum0 += biases[0];
sum1 += biases[1];
}
int data_i32_0 = round(sum0 * scales[0]);
if (data_i32_0 > 127)
data_i32_0 = 127;
else if (data_i32_0 < -127)
data_i32_0 = -127;
int data_i32_1 = round(sum1 * scales[1]);
if (data_i32_1 > 127)
data_i32_1 = 127;
else if (data_i32_0 < -127)
data_i32_1 = -127;
output[0] = data_i32_0;
output[1] = data_i32_1;
}
// start and end channel must be 8 aligned
void gemv1x8(const int8_t *input, const int8_t *output, int8_t *weight_interleaved,
const int32_t *biases, const float *scales,
int kernel_size, int start_channel, int end_channel, int num_thread,
int cpu_affinity)
{
int ch = 0;
int8_t *cur_kernel, *cur_result;
int32_t *cur_biases;
const float *cur_scales;
// #pragma omp parallel for num_threads(num_thread)
for (ch = start_channel; ch < end_channel; ch += 8)
{
cur_kernel = (int8_t *) (weight_interleaved + kernel_size * ch);
cur_result = (int8_t *) (output + ch);
cur_biases = biases ? (int32_t *) (biases + ch) : NULL;
cur_scales = scales + ch;
gemv_1x8_int8(cur_biases, cur_scales, (int8_t *) input, cur_kernel, kernel_size,
cur_result);
}
}
// start channel must be 2 aligned
void gemv1x2(const int8_t *input, int8_t *output, int8_t *weight_interleaved,
const int32_t *biases, const float *scales,
int kernel_size,int start_channel,int end_channel,int num_thread,int cpu_affinity)
{
int32_t sum;
int ch = 0;
int8_t *cur_kernel;
int32_t *cur_biases;
int8_t *cur_result;
const float* cur_scales;
for (ch = start_channel; ch < (end_channel & -2); ch += 2) {
cur_kernel = (int8_t *) (weight_interleaved + kernel_size * ch);
cur_result = (int8_t *) (output + ch);
cur_biases = biases ? (int32_t *) (biases + ch) : NULL;
cur_scales = scales + ch;
gemv_1x2_int8(cur_biases, cur_scales, (int8_t*) input, cur_kernel, kernel_size, cur_result);
}
if (end_channel & 0x1) {
cur_kernel = (int8_t *) (weight_interleaved + kernel_size * ch);
cur_result = (int8_t *) (output + ch);
sum = biases ? *(biases + ch) : 0;
for (int j = 0; j < kernel_size; j++)
sum = sum + input[j] * cur_kernel[j];
int data_i32_0 = round(sum * cur_scales[0]);
if (data_i32_0 > 127)
data_i32_0 = 127;
else if (data_i32_0 < -127)
data_i32_0 = -127;
*cur_result = data_i32_0;
}
}
static void interleave_kernel(const int8_t *kernel, int8_t *kernel_interleaved, int out_chan, int kernel_size)
{
int i, j, k;
int8_t *cur_kernel[8];
int8_t *cur_kernel_interleaved;
// interleave 8 kernel
for (i = 0; i < (out_chan & -8); i += 8)
{
for (j = 0; j < 8; j++)
cur_kernel[j] = (int8_t *) kernel + kernel_size * (i + j);
cur_kernel_interleaved = (int8_t *) kernel_interleaved + kernel_size * i;
for (k = 0; k < kernel_size; k++)
for (j = 0; j < 8; j++)
cur_kernel_interleaved[8 * k + j] = *(cur_kernel[j] + k);
}
// interleave 2 kernel
for (; i < (out_chan & -2); i += 2)
{
for (j = 0; j < 2; j++)
cur_kernel[j] = (int8_t *) kernel + kernel_size * (i + j);
cur_kernel_interleaved = (int8_t *) kernel_interleaved + kernel_size * i;
for (k = 0; k < kernel_size; k++)
for (j = 0; j < 2; j++)
cur_kernel_interleaved[2 * k + j] = *(cur_kernel[j] + k);
}
// copy last kernel
if (out_chan & 0x1)
{
cur_kernel[0] = (int8_t *) kernel + kernel_size * i;
cur_kernel_interleaved = (int8_t *) kernel_interleaved + kernel_size * i;
for (k = 0; k < kernel_size; k++)
cur_kernel_interleaved[k] = *(cur_kernel[0] + k);
}
return;
}
int int8_fc_kernel_prerun(struct ir_tensor *input_tensor, \
struct ir_tensor *filter_tensor, \
struct ir_tensor *output_tensor, \
struct fc_priv_info *priv_info, \
struct fc_param *param)
{
int num_output = param->num_output;
int kernel_size = filter_tensor->dims[1];
int kernel_align = ((kernel_size + 1) & -2);
if (!priv_info->interleave_buffer)
{
int mem_size = num_output * kernel_align;
void *mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
if (!priv_info->input_buffer)
{
int mem_size = kernel_align;
void *mem = sys_malloc(mem_size);
priv_info->input_buffer = mem;
priv_info->input_buffer_size = mem_size;
}
int8_t *filter_data = (int8_t *) filter_tensor->data;
interleave_kernel(filter_data, (int8_t *) priv_info->interleave_buffer, num_output,
kernel_size);
return 0;
}
int int8_fc_kernel_run(struct ir_tensor *input_tensor, \
struct ir_tensor *filter_tensor, \
struct ir_tensor *bias_tensor, \
struct ir_tensor *output_tensor, \
struct fc_priv_info *priv_info, \
struct fc_param *param, \
int num_thread, int cpu_affinity) {
int out_num = param->num_output;
int kernel_size = filter_tensor->dims[1];
int8_t *input = (int8_t *) input_tensor->data;
int8_t *output = (int8_t *) output_tensor->data;
int8_t *weight = (int8_t *) priv_info->interleave_buffer;
int32_t *biases = NULL;
if (bias_tensor)
biases = (int32_t *) bias_tensor->data;
float input_scale = input_tensor->scale;
float output_scale = output_tensor->scale;
float *weight_scales = filter_tensor->scale_list;
float *requant_scales = (float *) malloc(out_num * sizeof(float));
for (int i = 0; i < out_num; i++)
requant_scales[i] = (input_scale * weight_scales[i]) / output_scale;
int out_num_8 = out_num & ~7;
for (int i = 0; i < input_tensor->dims[0]; i++)
{
int8_t *cur_input = input + i * kernel_size;
int8_t *cur_output = output + i * out_num;
gemv1x8(cur_input, cur_output, weight, biases, requant_scales, kernel_size, 0, out_num_8, num_thread, cpu_affinity);
if (out_num & 0x7)
gemv1x2(cur_input, cur_output, weight, biases, requant_scales, kernel_size, out_num_8,out_num,num_thread, cpu_affinity);
}
return 0;
}
|
jacobi.c | #include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define U(i,j) (u[i][j])
#define Uold(i,j) (uold[i][j])
#define F(i,j) (f[i][j])
void jacobi (int n, double dx, double omega, double **u, double **f, double tol, int maxit);
double** dalloc_matrix(int n, int m) {
double **mat = (double**) malloc (n * sizeof(double*));
#pragma omp parallel for
for(int i = 0; i < n; i++)
mat[i] = (double*) malloc (m * sizeof(double));
return mat;
}
void dfree_matrix(double **mat, int n) {
#pragma omp parallel for
for(int i = 0; i < n; ++i)
free(mat[i]);
free(mat);
}
double cpu_time() {
return clock()/((double) CLOCKS_PER_SEC);
}
/*****************************************************
* Inicializando os dados
******************************************************/
void initialize(int n,
double *dx,
double *dy,
double **u,
double **f) {
int i, j;
double xxexp, yy;
*dx = 1.0 / (n - 1);
*dy = 1.0 / (n - 1);
/* Inicializando condicao inicial e RHS */
#pragma omp parallel for default(shared) private(i, j, xxexp, yy)
for (i = 0; i < n; i++) {
xxexp = exp((*dx) * (i - 1));
for (j = 0; j < n; j++) {
yy = (*dy) * (j - 1);
U(i,j) = 0.0;
F(i,j) = xxexp * yy;
}
}
}
/************************************************************
* Verificacao do erro entre a solucao numerica e exata
************************************************************/
void error_check(int n,
double dx,
double dy,
double **u) {
int i, j;
double xxexp, yy, temp, error;
#pragma omp parallel for reduction(+:error) private(i, j, xxexp, yy, temp)
for (i = 0; i < n; i++){
xxexp = exp(dx * (i - 1));
for (j = 0; j < n; j++){
yy = dy * (j - 1);
temp = U(i,j) - xxexp * exp(-2.0 * yy);
error += temp * temp;
}
}
error = sqrt(error)/(n * n);
printf("Erro : %g\n", error);
}
int main(int argc, char **argv) {
int n, m, mits;
double tol, relax;
double **u, **f, dx, dy;
double dt, start, end, mflops;
m = n = 100;
relax = 1.0;
tol = 1.0e-5;
mits = 1000;
if(argc == 4) {
n = atoi(argv[1]);
tol = atof(argv[2]);
mits = atoi(argv[3]);
} else {
printf("Use: ./jacobi <n> <tol> <mits>\n");
printf("where\n");
printf(" <n> : numero de pontos em X e Y (default 100)\n");
printf(" <tol> : tolerancia do erro (default 1e-5)\n");
printf(" <mits> : numero maximo de iteracoes (default 1000)\n");
}
printf("-> %d, %g, %g, %d\n", n, relax, tol, mits);
// Alocando as estruturas de dados
u = dalloc_matrix(n, m);
f = dalloc_matrix(n, m);
/* Dados inicializados */
initialize(n, &dx, &dy, u, f);
double pi = acos(-1.0);
relax = 2.0/(1.0 + sin(pi*dx));
printf("relax parameter: %f \n", relax);
/* Resolvendo a equação */
start = cpu_time();
jacobi(n, dx, relax, u,f, tol, mits);
end = cpu_time();
dt = end-start;
printf(" elapsed time : %12.6f\n", dt);
mflops = (0.000001*mits*(m-2)*(n-2)*13) / dt;
printf(" MFlops : %12.6g (%d, %d, %d, %g)\n",mflops, mits, m, n, dt);
// Verificando o erro.
error_check(n, dx, dy, u);
// Liberando memória.
dfree_matrix(u, n);
dfree_matrix(f, n);
return 0;
}
/*
subroutine jacobi (n,dx,alpha,omega,u,f,tol,maxit)
******************************************************************
* Resolve equação de poisson em um grid retangular assumindo :
* (1) discretização uniforme em cada direção, e
* (2) condiçoes de contorno de Dirichlect
*
* Metodo de jacobi é usado nesta rotina
*
* Input : n número de pontos nas direcoes X/Y
* dx espacamento entre os pontos
* omega fator de relaxação SOR
* f(n,m) vetor RHS
* u(n,m) vetor soluçao
* tol tolerancia do metodo iterativo
* maxit numero máximo de iteracoes
*
* Output : u(n,m) - solucao
*****************************************************************
*/
void jacobi (int n, double dx, double omega, double **u, double **f, double tol, int maxit ) {
int i,j,k;
double error, resid, h2;
double **uold;
uold = dalloc_matrix(n,n);
h2 = (dx * dx); /* X-direction coef */
error = 10.0 * tol;
k = 1;
while (k <= maxit && error > tol)
{
/* copia solução corrente para solução antiga */
#pragma omp parallel for private(i, j) collapse(2)
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
Uold(i,j) = U(i,j);
/* calcula o residuo por diferencas finitas */
#pragma omp parallel for reduction(+:error) private(i, j, resid) collapse(2)
for (i = 1; i < n - 1; i++) {
for (j = 1; j < n - 1; j++) {
/* atualiza a solução */
U(i,j) = (1.0 - omega) * U(i,j) + omega * (U(i-1,j) + U(i+1,j) + U(i,j-1) + U(i,j+1) + h2 * F(i,j)) / 4.0;
/* acumula o erro relativo */
resid = U(i,j) - Uold(i,j);
error += resid * resid;
}
}
/* verifica o erro */
k++;
error = sqrt(error);
} /* while */
printf("Numero total de iteracoes: %d\n", k);
printf("Error : %10.15f\n", error);
dfree_matrix(uold, n);
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.