source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
trilinos_residual_criteria.h | // KRATOS _____ _ _ _
// |_ _| __(_) (_)_ __ ___ ___
// | || '__| | | | '_ \ / _ \/ __|
// | || | | | | | | | | (_) \__
// |_||_| |_|_|_|_| |_|\___/|___/ APPLICATION
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela
//
#if !defined(KRATOS_TRILINOS_RESIDUAL_CRITERIA_H_INCLUDED)
#define KRATOS_TRILINOS_RESIDUAL_CRITERIA_H_INCLUDED
// System includes
// External includes
// Project includes
#include "includes/define.h"
#include "solving_strategies/convergencecriterias/residual_criteria.h"
namespace Kratos
{
///@addtogroup TrilinosApplication
///@{
///@name Kratos Classes
///@{
/// MPI version of the ResidualCriteria.
/** Implements a convergence criteria based on the norm of the (free rows of) the RHS vector.
* @see ResidualCriteria
*/
template< class TSparseSpace, class TDenseSpace >
class TrilinosResidualCriteria : public ResidualCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of TrilinosResidualCriteria
KRATOS_CLASS_POINTER_DEFINITION(TrilinosResidualCriteria);
typedef ResidualCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
///@}
///@name Life Cycle
///@{
/// Constructor
explicit TrilinosResidualCriteria(TDataType NewRatioTolerance,TDataType AlwaysConvergedNorm):
ResidualCriteria<TSparseSpace,TDenseSpace>(NewRatioTolerance, AlwaysConvergedNorm)
{}
/// Copy constructor
explicit TrilinosResidualCriteria(const TrilinosResidualCriteria& rOther):
ResidualCriteria<TSparseSpace,TDenseSpace>(rOther)
{}
/// Destructor.
~TrilinosResidualCriteria() override {}
///@}
///@name Operators
///@{
/// Deleted assignment operator.
TrilinosResidualCriteria& operator=(TrilinosResidualCriteria const& rOther) = delete;
///@}
protected:
///@name Protected Operations
///@{
/**
* @brief This method computes the norm of the residual
* @details It checks if the dof is fixed
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rResidualSolutionNorm The norm of the residual
* @param rDofNum The number of DoFs
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param b RHS vector (residual + reactions)
*/
void CalculateResidualNorm(
ModelPart& rModelPart,
TDataType& rResidualSolutionNorm,
typename BaseType::SizeType& rDofNum,
typename BaseType::DofsArrayType& rDofSet,
const typename BaseType::TSystemVectorType& rB) override
{
// Initialize
TDataType residual_solution_norm = TDataType();
long int local_dof_num = 0;
const double rank = rB.Comm().MyPID(); // To compare with PARTITION_INDEX, which is a double variable
// Loop over Dofs
#pragma omp parallel for reduction(+:residual_solution_norm,local_dof_num)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = rDofSet.begin() + i;
typename BaseType::IndexType dof_id;
TDataType residual_dof_value;
if (it_dof->IsFree() && (it_dof->GetSolutionStepValue(PARTITION_INDEX) == rank)) {
dof_id = it_dof->EquationId();
residual_dof_value = TSparseSpace::GetValue(rB,dof_id);
residual_solution_norm += residual_dof_value * residual_dof_value;
local_dof_num++;
}
}
// Combine local contributions
// Note that I'm not merging the two calls because one adds doubles and the other ints (JC)
rB.Comm().SumAll(&residual_solution_norm,&rResidualSolutionNorm,1);
// SizeType is long unsigned int in linux, but EpetraComm does not support unsigned types
long int global_dof_num = 0;
rB.Comm().SumAll(&local_dof_num,&global_dof_num,1);
rDofNum = static_cast<typename BaseType::SizeType>(global_dof_num);
rResidualSolutionNorm = std::sqrt(rResidualSolutionNorm);
}
///@}
private:
///@name Member Variables
///@{
///@}
///@name Private Operations
///@{
///@}
}; // Class TrilinosResidualCriteria
///@}
///@} addtogroup block
} // namespace Kratos.
#endif // KRATOS_TRILINOS_RESIDUAL_CRITERIA_H_INCLUDED defined
|
sampling.c | // Copyright (c) 2018-2019 Osamu Hirose
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<assert.h>
#include"kdtree.h"
#include"misc.h"
#define SQ(x) ((x)*(x))
double genrand64_real1(void);
static void resampling(int *nums, const double *probs, int N, int M){
int i; double u =genrand64_real1()/(double)M;
for(i=0;i<N;i++){
nums[i]=floor((probs[i]-u)*M)+1;
u+=(nums[i]/(double)M)-probs[i];
}
}
static void downsample_a(double *x, int L, double *X, int D, int N){
int *U; int d,l; if(L>N){goto err01;}
U=(int *)calloc(N,sizeof(int));
randperm(U,N); for(d=0;d<D;d++)for(l=0;l<L;l++){x[d+D*l]=X[d+D*U[l]];}
free(U); return;
err01:
printf("\n\n ERROR: L<=N must be satisfied in the function 'downsample_a'. Abort.\n\n");
exit(EXIT_FAILURE);
}
static void downsample_b(double *x, int L, double *X, int D, int N, double e){
int *S,*T,*a,*c; double *w,*v; int sd=sizeof(double),si=sizeof(int);
int d,j,n,q,l=0,mtd=MAXTREEDEPTH; double val=0;
/* allocation */
T= (int *)calloc(3*N+1,si); S= (int *)calloc(N*mtd,si);
a= (int *)calloc(6*N, si); w= (double *)calloc(N,sd);
v= (double *)calloc(2*N, sd); c= (int *)calloc(N,si);
/* build kdtree */
kdtree(T,a,v,X,D,N);
/* count #neighbors */
#pragma omp parallel for private (j) private (q)
for(n=0;n<N;n++){j=q=0;w[n]=0.0f;
do{eballsearch_next(&j,S+mtd*n,&q,X+D*n,e,X,T,D,N);if(j>=0){w[n]+=1.0f;}} while(q);
assert(w[n]>=1.0f);
}
/* sampling probabilities */
for(n=0;n<N;n++) val+=1.0f/(w[n]);
for(n=0;n<N;n++) w[n]=1.0f/(w[n]*val);
/* resampling */
resampling(c,w,N,L);
/* output */
for(n=0;n<N;n++)for(j=0;j<c[n];j++){for(d=0;d<D;d++){x[d+D*l]=X[d+D*n];} l++;}
free(T);free(a);free(v);
free(S);free(w);free(c);
}
/* voxel grid filter */
static void downsample_c(double *x, int L, double *X, int D, int N, double e){
int d,j,l=0,n,num; size_t K; int *v,*c,*np,*cum,*div; double *w,*max,*min; int sd=sizeof(double),si=sizeof(int);
double val=0;
/* allocation */
v= (int *)calloc(N,si); max= (double *)calloc(D,sd); div= (int *)calloc(D,si); w= (double *)calloc(N,sd);
c= (int *)calloc(N,si); min= (double *)calloc(D,sd); cum= (int *)calloc(D,si);
/* bounding box */
for(d=0;d<D;d++){min[d]=X[d];for(n=0;n<N;n++){min[d]=X[d+D*n]<min[d]?X[d+D*n]:min[d];}}
for(d=0;d<D;d++){max[d]=X[d];for(n=0;n<N;n++){max[d]=X[d+D*n]>max[d]?X[d+D*n]:max[d];}}
/* divide in grid & count points in a voxel */
for(d=0;d<D;d++) div[d]=ceil((max[d]-min[d])/e);
cum[0]=1; cum[1]=div[0]; for(d=2;d<D;d++) cum[d]=cum[d-1]*div[d-1];
K=cum[D-1]*div[D-1]; if(K>=1e8){printf(" ERROR: Voxel grid width is too small. Abort.\n\n"); exit(EXIT_FAILURE);}
np= (int *)calloc(K,si);
for(n=0;n<N;n++){v[n]=0;for(d=0;d<D;d++){j=floor((X[d+D*n]-min[d])/e);j-=(j==div[d])?1:0;v[n]+=cum[d]*j;}}
for(n=0;n<N;n++) np[v[n]]++;
/* sampling probabilities */
for(n=0;n<N;n++){num=np[v[n]];assert(num>0);w[n]=1.0f/num;val+=w[n];}
for(n=0;n<N;n++) w[n]/=val;
/* resampling */
resampling(c,w,N,L);
/* output */
for(n=0;n<N;n++)for(j=0;j<c[n];j++){for(d=0;d<D;d++){x[d+D*l]=X[d+D*n];} l++;}
free(v);free(max);free(div);free(w);
free(c);free(min);free(cum);free(np);
}
void downsample(double *x, int L, double *X, int D, int N, double e){
if (e<0) downsample_c(x,L,X,D,N,-e);
else if(e>0) downsample_b(x,L,X,D,N, e);
else downsample_a(x,L,X,D,N);
}
|
parallel_priority_queue.h | /***************************************************************************
* include/stxxl/bits/containers/parallel_priority_queue.h
*
* Part of the STXXL. See http://stxxl.sourceforge.net
*
* Copyright (C) 2014-2015 Thomas Keh <thomas.keh@student.kit.edu>
* Copyright (C) 2014-2015 Timo Bingmann <tb@panthema.net>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
**************************************************************************/
#ifndef STXXL_CONTAINERS_PARALLEL_PRIORITY_QUEUE_HEADER
#define STXXL_CONTAINERS_PARALLEL_PRIORITY_QUEUE_HEADER
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <ctime>
#include <list>
#include <utility>
#include <numeric>
#include <vector>
#if STXXL_PARALLEL
#include <omp.h>
#endif
#if __cplusplus >= 201103L
#define STXXL_MOVE(T) std::move(T)
#else
#define STXXL_MOVE(T) T
#endif
#include <stxxl/bits/common/winner_tree.h>
#include <stxxl/bits/common/custom_stats.h>
#include <stxxl/bits/common/mutex.h>
#include <stxxl/bits/common/timer.h>
#include <stxxl/bits/common/is_heap.h>
#include <stxxl/bits/common/swap_vector.h>
#include <stxxl/bits/common/rand.h>
#include <stxxl/bits/config.h>
#include <stxxl/bits/io/request_operations.h>
#include <stxxl/bits/mng/block_alloc.h>
#include <stxxl/bits/mng/buf_ostream.h>
#include <stxxl/bits/mng/prefetch_pool.h>
#include <stxxl/bits/mng/block_manager.h>
#include <stxxl/bits/mng/read_write_pool.h>
#include <stxxl/bits/mng/typed_block.h>
#include <stxxl/bits/namespace.h>
#include <stxxl/bits/noncopyable.h>
#include <stxxl/bits/parallel.h>
#include <stxxl/bits/verbose.h>
#include <stxxl/types>
STXXL_BEGIN_NAMESPACE
namespace ppq_local {
/*!
* A random-access iterator class for block oriented data. The iterator is
* intended to be provided by the internal_array and external_array classes
* and to be used by the multiway_merge algorithm as input iterators.
*
* \tparam ValueType the value type
*/
template <class ValueType>
class ppq_iterator
{
public:
typedef ValueType value_type;
typedef value_type& reference;
typedef value_type* pointer;
typedef ptrdiff_t difference_type;
typedef std::random_access_iterator_tag iterator_category;
typedef std::vector<std::pair<pointer, pointer> > block_pointers_type;
protected:
typedef ppq_iterator self_type;
//! pointer to a vector of begin/end pointer pairs
//! They allow access to the data blocks.
const block_pointers_type* m_block_pointers;
//! pointer to the current element
pointer m_current;
//! index of the current element
size_t m_index;
//! index of the current element's block
size_t m_block_index;
//! size of each data block
size_t m_block_items;
public:
//! default constructor (should not be used directly)
ppq_iterator()
: m_block_pointers(NULL)
{ }
//! constructor
//!
//! \param block_pointers A reference to the properly initialized vector of begin and end pointers.
//! One pair for each block. The pointers should be valid for all blocks that
//! are expected to be accessed with this iterator.
//! \param block_items The size of a single block. If there is only one block (e.g. if the iterator
//! belongs to an internal_array), use the total size here.
//! \param index The index of the current element (global - index 0 belongs to the first element
//! in the first block, no matter if the values are still valid)
ppq_iterator(const block_pointers_type* block_pointers, size_t block_items,
size_t index)
: m_block_pointers(block_pointers),
m_index(index),
m_block_items(block_items)
{
update();
}
//! returns the value's index in the internal or external array
size_t get_index() const
{
return m_index;
}
reference operator * () const
{
assert(m_current);
return *m_current;
}
pointer operator -> () const
{
return &(operator * ());
}
reference operator [] (difference_type relative_index) const
{
const difference_type index = m_index + relative_index;
const size_t block_index = index / m_block_items;
const size_t local_index = index % m_block_items;
assert(block_index < m_block_pointers->size());
assert((*m_block_pointers)[block_index].first + local_index
< (*m_block_pointers)[block_index].second);
return *((*m_block_pointers)[block_index].first + local_index);
}
//! prefix-increment operator
self_type& operator ++ ()
{
++m_index;
++m_current;
if (UNLIKELY(m_current == (*m_block_pointers)[m_block_index].second)) {
if (m_block_index + 1 < m_block_pointers->size()) {
m_current = (*m_block_pointers)[++m_block_index].first;
}
else {
// global end
assert(m_block_index + 1 == m_block_pointers->size());
m_current = (*m_block_pointers)[m_block_index++].second;
}
}
return *this;
}
//! prefix-decrement operator
self_type& operator -- ()
{
assert(m_index > 0);
--m_index;
if (m_block_index >= m_block_pointers->size()
|| m_current == (*m_block_pointers)[m_block_index].first) {
// begin of current block or global end
assert(m_block_index > 0);
assert(m_block_index <= m_block_pointers->size());
m_current = (*m_block_pointers)[--m_block_index].second - 1;
}
else {
--m_current;
}
return *this;
}
self_type operator + (difference_type addend) const
{
return self_type(m_block_pointers, m_block_items, m_index + addend);
}
self_type& operator += (difference_type addend)
{
m_index += addend;
update();
return *this;
}
self_type operator - (difference_type subtrahend) const
{
return self_type(m_block_pointers, m_block_items, m_index - subtrahend);
}
difference_type operator - (const self_type& o) const
{
return (m_index - o.m_index);
}
self_type& operator -= (difference_type subtrahend)
{
m_index -= subtrahend;
update();
return *this;
}
bool operator == (const self_type& o) const
{
return m_index == o.m_index;
}
bool operator != (const self_type& o) const
{
return m_index != o.m_index;
}
bool operator < (const self_type& o) const
{
return m_index < o.m_index;
}
bool operator <= (const self_type& o) const
{
return m_index <= o.m_index;
}
bool operator > (const self_type& o) const
{
return m_index > o.m_index;
}
bool operator >= (const self_type& o) const
{
return m_index >= o.m_index;
}
friend std::ostream& operator << (std::ostream& os, const ppq_iterator& i)
{
return os << "[" << i.m_index << "]";
}
private:
//! updates m_block_index and m_current based on m_index
inline void update()
{
m_block_index = m_index / m_block_items;
const size_t local_index = m_index % m_block_items;
if (m_block_index < m_block_pointers->size()) {
m_current = (*m_block_pointers)[m_block_index].first + local_index;
assert(m_current <= (*m_block_pointers)[m_block_index].second);
}
else {
// global end if end is beyond the last real block
assert(m_block_index == m_block_pointers->size());
assert(local_index == 0);
//-tb old: m_current = (*m_block_pointers)[m_block_index - 1].second;
m_current = NULL;
}
}
};
/*!
* Internal arrays store a sorted sequence of values in RAM, which will be
* merged together into the deletion buffer when it needs to be
* refilled. Internal arrays are constructed from the insertions heaps when
* they overflow.
*/
template <class ValueType>
class internal_array : private noncopyable
{
public:
typedef ValueType value_type;
typedef ppq_iterator<value_type> iterator;
protected:
typedef typename iterator::block_pointers_type block_pointers_type;
//! Contains the items of the sorted sequence.
std::vector<value_type> m_values;
//! Index of the current head
unsigned_type m_min_index;
//! Level of internal array (Sander's PQ: group number)
unsigned_type m_level;
//! Begin and end pointers of the array
//! This is used by the iterator
block_pointers_type m_block_pointers;
public:
//! Default constructor. Don't use this directy. Needed for regrowing in
//! surrounding vector.
internal_array() : m_min_index(0) { }
//! Constructor which takes a value vector. The value vector is empty
//! afterwards.
internal_array(std::vector<value_type>& values,
unsigned_type min_index = 0,
unsigned_type level = 0)
: m_values(), m_min_index(min_index), m_level(level),
m_block_pointers(1)
{
std::swap(m_values, values);
STXXL_ASSERT(values.size() > 0);
m_block_pointers[0] = std::make_pair(&(*m_values.begin()), &(*m_values.begin()) + m_values.size());
}
//! Swap internal_array with another one.
void swap(internal_array& o)
{
using std::swap;
swap(m_values, o.m_values);
swap(m_min_index, o.m_min_index);
swap(m_level, o.m_level);
swap(m_block_pointers, o.m_block_pointers);
}
//! Swap internal_array with another one.
friend void swap(internal_array& a, internal_array& b)
{
a.swap(b);
}
//! Random access operator
inline value_type& operator [] (size_t i)
{
return m_values[i];
}
//! Use inc_min(diff) if multiple values have been extracted.
inline void inc_min(size_t diff = 1)
{
m_min_index += diff;
}
//! The currently smallest element in the array.
inline const value_type & get_min() const
{
return m_values[m_min_index];
}
//! The index of the currently smallest element in the array.
inline size_t get_min_index() const
{
return m_min_index;
}
//! The index of the largest element in the array.
inline size_t get_max_index() const
{
return (m_values.size() - 1);
}
//! Returns if the array has run empty.
inline bool empty() const
{
return (m_min_index >= m_values.size());
}
//! Make this array empty.
inline void make_empty()
{
m_min_index = m_values.size();
}
//! Returns the current size of the array.
inline size_t size() const
{
return (m_values.size() - m_min_index);
}
//! Returns the initial size of the array.
inline size_t capacity() const
{
return m_values.size();
}
//! Returns the level (group number) of the array.
inline unsigned_type level() const
{
return m_level;
}
//! Return the amount of internal memory used by an array with the capacity
//! in number of items.
static size_t int_memory(size_t capacity)
{
return sizeof(internal_array) + capacity * sizeof(value_type);
}
//! Return the amount of internal memory used by the array
inline size_t int_memory() const
{
return int_memory(m_values.capacity());
}
//! Begin iterator
inline iterator begin() const
{
// not const, unfortunately.
return iterator(&m_block_pointers, capacity(), m_min_index);
}
//! End iterator
inline iterator end() const
{
// not const, unfortunately.
return iterator(&m_block_pointers, capacity(), capacity());
}
};
template <class ExternalArrayType>
class external_array_writer;
/*!
* External array stores a sorted sequence of values on the hard disk and
* allows access to the first block (containing the smallest values). The
* class uses buffering and prefetching in order to improve the performance.
*
* \tparam ValueType Type of the contained objects (POD with no references to
* internal memory).
*
* \tparam BlockSize External block size. Default =
* STXXL_DEFAULT_BLOCK_SIZE(ValueType).
*
* \tparam AllocStrategy Allocation strategy for the external memory. Default =
* STXXL_DEFAULT_ALLOC_STRATEGY.
*/
template <
class ValueType,
unsigned_type BlockSize = STXXL_DEFAULT_BLOCK_SIZE(ValueType),
class AllocStrategy = STXXL_DEFAULT_ALLOC_STRATEGY
>
class external_array : private noncopyable
{
public:
typedef ValueType value_type;
typedef ppq_iterator<value_type> iterator;
typedef external_array<value_type, BlockSize, AllocStrategy> self_type;
typedef typed_block<BlockSize, value_type> block_type;
typedef read_write_pool<block_type> pool_type;
typedef std::vector<BID<BlockSize> > bid_vector;
typedef typename bid_vector::iterator bid_iterator;
typedef std::vector<block_type*> block_vector;
typedef std::vector<request_ptr> request_vector;
typedef std::vector<value_type> minima_vector;
typedef typename iterator::block_pointers_type block_pointers_type;
typedef external_array_writer<self_type> writer_type;
//! The number of elements fitting into one block
enum {
block_size = BlockSize,
block_items = BlockSize / sizeof(value_type)
};
static const bool debug = false;
protected:
//! The total size of the external array in items. Cannot be changed
//! after construction.
external_size_type m_capacity;
//! Number of blocks, again: calculated at construction time.
unsigned_type m_num_blocks;
//! Level of external array (Sander's PQ: group number)
unsigned_type m_level;
//! Common prefetch and write buffer pool
pool_type* m_pool;
//! The IDs of each block in external memory.
bid_vector m_bids;
//! A vector of size m_num_blocks with block_type pointers, some of them
//! will be filled while writing, but most are NULL.
block_vector m_blocks;
//! Begin and end pointers for each block, used for merging with
//! ppq_iterator.
block_pointers_type m_block_pointers;
//! The read request pointers are used to wait until the block has been
//! completely fetched.
request_vector m_requests;
//! stores the minimum value of each block
minima_vector m_minima;
//! Is array in write phase? True = write phase, false = read phase.
bool m_write_phase;
//! The total number of elements minus the number of extracted values
external_size_type m_size;
//! The read position in the array.
external_size_type m_index;
//! The index behind the last element that is located in RAM (or is at
//! least requested to be so)
external_size_type m_end_index;
//! The first unhinted block index.
unsigned_type m_unhinted_block;
//! The first unhinted block index as it was before the
//! prepare_rebuilding_hints() call. Used for removal of hints which aren't
//! needed anymore.
unsigned_type m_old_unhinted_block;
//! allow writer to access to all variables
friend class external_array_writer<self_type>;
public:
/*!
* Constructs an external array
*
* \param size The total number of elements. Cannot be changed after
* construction.
*
* \param num_prefetch_blocks Number of blocks to prefetch from hard disk
*
* \param num_write_buffer_blocks Size of the write buffer in number of
* blocks
*/
external_array(external_size_type size, pool_type* pool, unsigned_type level = 0)
: // constants
m_capacity(size),
m_num_blocks((size_t)div_ceil(m_capacity, block_items)),
m_level(level),
m_pool(pool),
// vectors
m_bids(m_num_blocks),
m_blocks(m_num_blocks, reinterpret_cast<block_type*>(1)),
m_block_pointers(m_num_blocks),
m_requests(m_num_blocks, NULL),
m_minima(m_num_blocks),
// state
m_write_phase(true),
// indices
m_size(0),
m_index(0),
m_end_index(0),
m_unhinted_block(0),
m_old_unhinted_block(0)
{
assert(m_capacity > 0);
// allocate blocks in EM.
block_manager* bm = block_manager::get_instance();
bm->new_blocks(AllocStrategy(), m_bids.begin(), m_bids.end());
}
//! Default constructor. Don't use this directy. Needed for regrowing in
//! surrounding vector.
external_array()
: // constants
m_capacity(0),
m_num_blocks(0),
m_level(0),
m_pool(NULL),
// vectors
m_bids(0),
m_blocks(0),
m_block_pointers(0),
m_requests(0),
m_minima(0),
// state
m_write_phase(false),
// indices
m_size(0),
m_index(0),
m_end_index(0),
m_unhinted_block(0),
m_old_unhinted_block(0)
{ }
//! Swap external_array with another one.
void swap(external_array& o)
{
using std::swap;
// constants
swap(m_capacity, o.m_capacity);
swap(m_num_blocks, o.m_num_blocks);
swap(m_level, o.m_level);
swap(m_pool, o.m_pool);
// vectors
swap(m_bids, o.m_bids);
swap(m_requests, o.m_requests);
swap(m_blocks, o.m_blocks);
swap(m_block_pointers, o.m_block_pointers);
swap(m_minima, o.m_minima);
// state
swap(m_write_phase, o.m_write_phase);
// indices
swap(m_size, o.m_size);
swap(m_index, o.m_index);
swap(m_end_index, o.m_end_index);
swap(m_unhinted_block, o.m_unhinted_block);
swap(m_old_unhinted_block, o.m_old_unhinted_block);
}
//! Swap external_array with another one.
friend void swap(external_array& a, external_array& b)
{
a.swap(b);
}
//! Destructor
~external_array()
{
if (m_size == 0) return;
// not all data has been read! this only happen when the PPQ is
// destroyed while containing data.
const unsigned_type block_index = m_index / block_items;
const unsigned_type end_block_index = get_end_block_index();
// released blocks currently held in RAM
for (size_t i = block_index; i < end_block_index; ++i) {
m_pool->add_prefetch(m_blocks[i]);
// cannot report the number of freed blocks to PPQ.
}
// cancel currently hinted blocks
for (size_t i = end_block_index; i < m_unhinted_block; ++i) {
STXXL_DEBUG("ea[" << this << "]: discarding prefetch hint on"
" block " << i);
m_requests[i]->cancel();
m_requests[i]->wait();
// put block back into pool
m_pool->add_prefetch(m_blocks[i]);
// invalidate block entry
m_blocks[i] = NULL;
m_requests[i] = request_ptr();
}
// figure out first block that is still allocated in EM.
bid_iterator i_begin = m_bids.begin() + block_index;
block_manager::get_instance()->delete_blocks(i_begin, m_bids.end());
// check that all is empty
for (size_t i = block_index; i < end_block_index; ++i)
assert(m_blocks[i] == NULL);
}
//! Returns the capacity in items.
size_t capacity() const
{
return m_capacity;
}
//! Returns the current size in items.
size_t size() const
{
return m_size;
}
//! Returns true if the array is empty.
bool empty() const
{
return (m_size == 0);
}
//! Returns the level (group number) of the array.
inline unsigned_type level() const
{
return m_level;
}
//! Return the number of blocks.
size_t num_blocks() const
{
return m_num_blocks;
}
//! Returns memory usage of EA with given capacity, excluding blocks loaded
//! in RAM. Blocks belong to prefetch pool.
static size_t int_memory(size_t capacity)
{
size_t num_blocks = div_ceil(capacity, block_items);
return sizeof(external_array)
+ num_blocks * sizeof(typename bid_vector::value_type)
+ num_blocks * sizeof(typename block_vector::value_type)
+ num_blocks * sizeof(typename block_pointers_type::value_type)
+ num_blocks * sizeof(typename request_vector::value_type)
+ num_blocks * sizeof(typename minima_vector::value_type);
}
//! Return the amount of internal memory used by the EA.
inline size_t int_memory() const
{
return int_memory(m_capacity);
}
//! Returns the number elements available in internal memory
size_t buffer_size() const
{
return (m_end_index - m_index);
}
//! Returns the block beyond the block in which *(m_end_index-1) is located.
unsigned_type get_end_block_index() const
{
unsigned_type end_block_index = m_end_index / block_items;
// increase block index if inside the block
if (m_end_index % block_items != 0) ++end_block_index;
assert(end_block_index <= m_num_blocks);
return end_block_index;
}
//! Returns the block in which m_index is located.
inline unsigned_type get_current_block_index() const
{
return (m_index / block_items);
}
//! Returns a random-access iterator to the begin of the data
//! in internal memory.
iterator begin() const
{
//-TODO?: assert(block_valid(m_index / block_items) || m_index == m_capacity);
return iterator(&m_block_pointers, block_items, m_index);
}
//! Returns a random-access iterator 1 behind the end of the data
//! in internal memory.
iterator end() const
{
//-TODO? assert(!block_valid(m_end_index / block_items) || m_end_index == m_capacity);
return iterator(&m_block_pointers, block_items, m_end_index);
}
//! Returns the smallest element in the array
const value_type & get_min()
{
return *begin();
}
//! Returns if there is data in EM, that's not randomly accessible.
bool has_em_data() const
{
return (get_end_block_index() < m_num_blocks);
}
//! Returns the smallest element of the first block NOT in internal memory
//! (or at least requested to be in internal memory)
const value_type & get_next_block_min() const
{
assert(get_end_block_index() < m_num_blocks);
return m_minima[get_end_block_index()];
}
//! Returns if the data requested to be in internal memory is
//! completely fetched. True if wait() has been called before.
bool valid() const
{
bool result = true;
const unsigned_type block_index = m_index / block_items;
const unsigned_type end_block_index = get_end_block_index();
for (unsigned_type i = block_index; i < end_block_index; ++i) {
result = result && block_valid(i);
}
return result;
}
//! Random access operator for data in internal memory
//! You should call wait() once after fetching data from EM.
value_type& operator [] (size_t i) const
{
assert(i < m_capacity);
const size_t block_index = i / block_items;
const size_t local_index = i % block_items;
assert(i < m_capacity);
assert(block_valid(block_index));
return m_blocks[block_index]->elem[local_index];
}
public:
//! prepare the pool for writing external arrays with given number of
//! threads
static void prepare_write_pool(pool_type& pool, unsigned_type num_threads)
{
unsigned_type write_blocks = num_threads;
// need at least one
if (write_blocks == 0) write_blocks = 1;
// for holding boundary blocks
write_blocks *= 2;
// more disks than threads?
if (write_blocks < config::get_instance()->disks_number())
write_blocks = config::get_instance()->disks_number();
#if STXXL_DEBUG_ASSERTIONS
// required for re-reading the external array
write_blocks = 2 * write_blocks;
#endif
if (pool.size_write() < write_blocks) {
STXXL_ERRMSG("WARNING: enlarging PPQ write pool to " <<
write_blocks << " blocks = " <<
write_blocks * block_size / 1024 / 1024 << " MiB");
pool.resize_write(write_blocks);
}
}
protected:
//! prepare the external_array for writing using multiway_merge() with
//! num_threads. this method is called by the external_array_writer's
//! constructor.
void prepare_write(unsigned_type num_threads)
{
prepare_write_pool(*m_pool, num_threads);
}
//! finish the writing phase after multiway_merge() filled the vector. this
//! method is called by the external_array_writer's destructor..
void finish_write()
{
// check that all blocks where written
for (unsigned_type i = 0; i < m_num_blocks; ++i)
assert(m_blocks[i] == NULL);
// compatibility to the block write interface
m_size = m_capacity;
m_index = 0;
m_end_index = 0;
m_unhinted_block = 0;
m_write_phase = false;
}
//! Called by the external_array_writer to read a block from disk into
//! m_blocks[]. If the block is marked as uninitialized, then no read is
//! performed. This is the usual case, and in theory, no block ever has be
//! re-read from disk, since all can be written fully. However, we do
//! support re-reading blocks for debugging purposes inside
//! multiway_merge(), in a full performance build re-reading never occurs.
void read_block(size_t block_index)
{
assert(block_index < m_num_blocks);
assert(m_blocks[block_index] == NULL ||
m_blocks[block_index] == reinterpret_cast<block_type*>(1));
if (m_blocks[block_index] == reinterpret_cast<block_type*>(1))
{
// special marker: this block is uninitialized -> no need to read
// from disk.
m_blocks[block_index] = m_pool->steal();
}
else
{
// block was already written, have to read from EM.
STXXL_DEBUG("ea[" << this << "]: "
"read_block needs to re-read block index=" << block_index);
static bool s_warned = false;
if (!s_warned)
{
s_warned = true;
STXXL_ERRMSG("ppq::external_array[" << this << "] "
"writer requested to re-read block from EM.");
STXXL_ERRMSG("This should never occur in full-performance mode, "
"verify that you run in debug mode.");
}
// this re-reading is not necessary for full performance builds, so
// we immediately wait for the I/O to be completed.
m_blocks[block_index] = m_pool->steal();
request_ptr req = m_pool->read(m_blocks[block_index], m_bids[block_index]);
req->wait();
assert(req->poll());
assert(m_blocks[block_index]);
}
}
//! Called by the external_array_writer to write a block from m_blocks[] to
//! disk. Prior to writing and releasing the memory, extra information is
//! preserved.
void write_block(size_t block_index)
{
assert(block_index < m_num_blocks);
assert(m_blocks[block_index] != NULL &&
m_blocks[block_index] != reinterpret_cast<block_type*>(1));
// calculate minimum and maximum values
const internal_size_type this_block_items =
std::min<internal_size_type>(block_items, m_capacity - block_index * (external_size_type)block_items);
STXXL_DEBUG("ea[" << this << "]: write_block index=" << block_index <<
" this_block_items=" << this_block_items);
assert(this_block_items > 0);
block_type& this_block = *m_blocks[block_index];
m_minima[block_index] = this_block[0];
// write out block (in background)
m_pool->write(m_blocks[block_index], m_bids[block_index]);
m_blocks[block_index] = NULL;
}
public:
//! \name Prefetching Hints
//! \{
//! Prefetch the next unhinted block, requires one free read block from the
//! global pool.
void hint_next_block()
{
assert(m_unhinted_block < m_num_blocks);
// will read (prefetch) block i
size_t i = m_unhinted_block++;
STXXL_DEBUG("ea[" << this << "]: prefetching block_index=" << i);
assert(m_pool->size_write() > 0);
assert(m_blocks[i] == NULL);
// steal block from pool, but also perform read via pool, since this
// checks the associated write_pool.
m_blocks[i] = m_pool->steal_prefetch();
m_requests[i] = m_pool->read(m_blocks[i], m_bids[i]);
}
//! Returns if there is data in EM, that's not already hinted
//! to the prefetcher.
bool has_unhinted_em_data() const
{
return (m_unhinted_block < m_num_blocks);
}
//! Returns the smallest element of the next hint candidate (the block
//! after the last hinted one).
const value_type & get_next_hintable_min() const
{
assert(m_unhinted_block < m_num_blocks);
return m_minima[m_unhinted_block];
}
//! Returns the number of hinted blocks.
size_t num_hinted_blocks() const
{
assert(get_end_block_index() <= m_unhinted_block);
return m_unhinted_block - get_end_block_index();
}
//! This method prepares rebuilding the hints (this is done after creating
//! a new EA in order to always have globally the n blocks hinted which
//! will be fetched first). Resets m_unhinted_block to the first block not
//! in RAM. Thereafter prehint_next_block() is used to advance this index.
//! finish_rebuilding_hints() should be called after placing all hints in
//! order to clean up the prefetch pool.
void rebuild_hints_prepare()
{
m_old_unhinted_block = m_unhinted_block;
m_unhinted_block = get_end_block_index();
assert(get_end_block_index() <= m_old_unhinted_block);
}
//! Advance m_unhinted_block index without actually prefetching.
void rebuild_hints_prehint_next_block()
{
assert(m_unhinted_block < m_num_blocks);
// will read (prefetch) block after cancellations.
STXXL_DEBUG("ea[" << this << "]: pre-hint of" <<
" block_index=" << m_unhinted_block);
++m_unhinted_block;
}
//! Cancel hints which aren't needed anymore from the prefetcher and fixes
//! it's size. prepare_rebuilding_hints() must be called before!
void rebuild_hints_cancel()
{
for (size_t i = m_unhinted_block; i < m_old_unhinted_block; ++i) {
STXXL_DEBUG("ea[" << this << "]: discarding prefetch hint on"
" block " << i);
m_requests[i]->cancel();
m_requests[i]->wait();
// put block back into pool
m_pool->add_prefetch(m_blocks[i]);
// invalidate block entry
m_blocks[i] = NULL;
m_requests[i] = request_ptr();
}
}
//! Perform real-hinting of pre-hinted blocks, since now canceled blocks
//! are available.
void rebuild_hints_finish()
{
for (size_t i = m_old_unhinted_block; i < m_unhinted_block; ++i)
{
STXXL_DEBUG("ea[" << this << "]: perform real-hinting of"
" block " << i);
assert(m_pool->size_write() > 0);
assert(m_blocks[i] == NULL);
m_blocks[i] = m_pool->steal_prefetch();
m_requests[i] = m_pool->read(m_blocks[i], m_bids[i]);
}
}
//! \}
public:
//! \name Waiting and Removal
//! \{
//! Waits until the next prefetched block is read into RAM, then polls for
//! any further blocks that are done as well. Returns how many blocks were
//! successfully read.
unsigned_type wait_next_blocks()
{
size_t begin = get_end_block_index(), i = begin;
STXXL_DEBUG("ea[" << this << "]: waiting for" <<
" block index=" << i <<
" end_index=" << m_end_index);
assert(has_em_data());
assert(i < m_unhinted_block);
assert(m_bids[i].valid());
assert(m_requests[i].valid());
// wait for prefetched request to finish.
m_requests[i]->wait();
assert(m_requests[i]->poll());
assert(m_blocks[i]);
update_block_pointers(i);
++i;
// poll further hinted blocks if already done
while (i < m_unhinted_block && m_requests[i]->poll())
{
STXXL_DEBUG("ea[" << this << "]: poll-ok for" <<
" block index=" << i <<
" end_index=" << m_end_index);
m_requests[i]->wait();
assert(m_requests[i]->poll());
assert(m_blocks[i]);
update_block_pointers(i);
++i;
}
m_end_index = std::min(m_capacity, i * (external_size_type)block_items);
return i - begin;
}
//! Waits until all hinted blocks are read into RAM. Returns how many
//! blocks were successfully read.
unsigned_type wait_all_hinted_blocks()
{
size_t begin = get_end_block_index(), i = begin;
while (i < m_unhinted_block)
{
STXXL_DEBUG("wait_all_hinted_blocks(): ea[" << this << "]: waiting for" <<
" block index=" << i <<
" end_index=" << m_end_index);
m_requests[i]->wait();
assert(m_requests[i]->poll());
assert(m_blocks[i]);
update_block_pointers(i);
++i;
}
m_end_index = std::min(m_capacity, i * (external_size_type)block_items);
return i - begin;
}
//! Returns the number of blocks loaded in RAM.
size_t num_used_blocks() const
{
return get_end_block_index() - (m_index / block_items);
}
//! Removes the first n elements from the array. Returns the number of
//! blocks released into the block pool.
unsigned_type remove_items(size_t n)
{
assert(m_index + n <= m_capacity);
assert(m_index + n <= m_end_index);
assert(m_size >= n);
STXXL_DEBUG("ea[" << this << "]: remove " << n << " items");
if (n == 0)
return 0;
const size_t block_index = m_index / block_items;
const size_t index_after = m_index + n;
size_t block_index_after = index_after / block_items;
size_t local_index_after = index_after % block_items;
if (m_size == n && local_index_after != 0) // end of EA
++block_index_after;
assert(block_index_after <= m_num_blocks);
bid_iterator i_begin = m_bids.begin() + block_index;
bid_iterator i_end = m_bids.begin() + block_index_after;
assert(i_begin <= i_end);
block_manager::get_instance()->delete_blocks(i_begin, i_end);
for (size_t i = block_index; i < block_index_after; ++i) {
assert(block_valid(i));
// return block to pool
m_pool->add_prefetch(m_blocks[i]);
}
m_index = index_after;
m_size -= n;
unsigned_type blocks_freed = block_index_after - block_index;
STXXL_DEBUG("ea[" << this << "]: after remove:" <<
" index_after=" << index_after <<
" block_index_after=" << block_index_after <<
" local_index_after=" << local_index_after <<
" blocks_freed=" << blocks_freed <<
" num_blocks=" << m_num_blocks <<
" capacity=" << m_capacity);
assert(block_index_after <= m_num_blocks);
// at most one block outside of the currently loaded range
assert(block_index_after <= get_end_block_index());
return blocks_freed;
}
//! \}
protected:
//! Returns if the block with the given index is completely fetched.
bool block_valid(size_t block_index) const
{
if (!m_write_phase) {
if (block_index >= m_num_blocks) return false;
return (m_requests[block_index] && m_requests[block_index]->poll());
}
else {
return (bool)m_blocks[block_index];
}
}
//! Updates the m_block_pointers vector.
//! Should be called after any steal() or read() operation.
//! This is necessary for the iterators to work properly.
inline void update_block_pointers(size_t block_index)
{
STXXL_DEBUG("ea[" << this << "]: updating block pointers for " << block_index);
m_block_pointers[block_index].first = m_blocks[block_index]->begin();
if (block_index + 1 != m_num_blocks)
m_block_pointers[block_index].second = m_blocks[block_index]->end();
else
m_block_pointers[block_index].second =
m_block_pointers[block_index].first
+ (m_capacity - block_index * block_items);
assert(m_block_pointers[block_index].first != NULL);
assert(m_block_pointers[block_index].second != NULL);
}
inline size_t last_block_items()
{
size_t mod = m_capacity % block_items;
return (mod > 0) ? mod : (size_t)block_items;
}
};
/**
* An external_array can only be written using an external_array_writer
* object. The writer objects provides iterators which are designed to be used
* by stxxl::parallel::multiway_merge() to write the external memory blocks in
* parallel. Thus in the writer we coordinate thread-safe access to the blocks
* using reference counting.
*
* An external_array_writer::iterator has two states: normal and "live". In
* normal mode, the iterator only has a valid index into the external array's
* items. In normal mode, only index calculations are possible. Once
* operator*() is called, the iterators goes into "live" mode by requesting
* access to the corresponding block. Using reference counting the blocks is
* written once all iterators are finished with the corresponding block. Since
* with operator*() we cannot know if the value is going to be written or read,
* when going to live mode, the block must be read from EM. This read overhead,
* however, is optimized by marking blocks as uninitialized in external_array,
* and skipping reads for then. In a full performance build, no block needs to
* be read from disk. Reads only occur in debug mode, when the results are
* verify.
*
* The iterator's normal/live mode only stays active for the individual
* iterator object. When an iterator is copied/assigned/calculated with the
* mode is NOT inherited! The exception is prefix operator ++, which is used by
* multiway_merge() to fill an array. Thus the implementation of the iterator
* heavily depends on the behavior of multiway_merge() and is optimized for it.
*/
template <class ExternalArrayType>
class external_array_writer : public noncopyable
{
public:
typedef ExternalArrayType ea_type;
typedef external_array_writer self_type;
typedef typename ea_type::value_type value_type;
typedef typename ea_type::block_type block_type;
//! prototype declaration of nested class.
class iterator;
//! scope based debug variable
static const bool debug = false;
protected:
//! reference to the external array to be written
ea_type& m_ea;
#ifndef NDEBUG
//! total number of iterators referencing this writer
unsigned int m_ref_total;
#endif
//! reference counters for the number of live iterators on the
//! corresponding block in external_array.
std::vector<unsigned int> m_ref_count;
//! mutex for reference counting array (this is actually nicer than
//! openmp's critical)
mutex m_mutex;
//! optimization: hold live iterators for the expected boundary blocks of
//! multiway_merge().
std::vector<iterator> m_live_boundary;
protected:
//! read block into memory and increase reference count (called when an
//! iterator goes live on the block).
block_type * get_block_ref(size_t block_index)
{
scoped_mutex_lock lock(m_mutex);
assert(block_index < m_ea.num_blocks());
unsigned int ref = m_ref_count[block_index]++;
#ifndef NDEBUG
++m_ref_total;
#endif
if (ref == 0) {
STXXL_DEBUG("get_block_ref block_index=" << block_index <<
" ref=" << ref << " reading.");
m_ea.read_block(block_index);
}
else {
STXXL_DEBUG("get_block_ref block_index=" << block_index <<
" ref=" << ref);
}
return m_ea.m_blocks[block_index];
}
//! decrease reference count on the block, and possibly write it to disk
//! (called when an iterator releases live mode).
void free_block_ref(size_t block_index)
{
scoped_mutex_lock lock(m_mutex);
assert(block_index < m_ea.num_blocks());
#ifndef NDEBUG
assert(m_ref_total > 0);
--m_ref_total;
#endif
unsigned int ref = --m_ref_count[block_index];
if (ref == 0) {
STXXL_DEBUG("free_block_ref block_index=" << block_index <<
" ref=" << ref << " written.");
m_ea.write_block(block_index);
}
else {
STXXL_DEBUG("free_block_ref block_index=" << block_index <<
" ref=" << ref);
}
}
//! allow access to the block_ref functions
friend class iterator;
public:
/**
* An iterator which can be used to write (and read) an external_array via
* an external_array_writer. See the documentation of external_array_writer.
*/
class iterator
{
public:
typedef external_array_writer writer_type;
typedef ExternalArrayType ea_type;
typedef typename ea_type::value_type value_type;
typedef value_type& reference;
typedef value_type* pointer;
typedef ptrdiff_t difference_type;
typedef std::random_access_iterator_tag iterator_category;
typedef iterator self_type;
static const size_t block_items = ea_type::block_items;
//! scope based debug variable
static const bool debug = false;
protected:
//! pointer to the external array containing the elements
writer_type* m_writer;
//! when operator* or operator-> are called, then the iterator goes
//! live and allocates a reference to the block's data (possibly
//! reading it from EM).
bool m_live;
//! index of the current element, absolute in the external array
external_size_type m_index;
//! index of the current element's block in the external array's block
//! list. undefined while m_live is false.
internal_size_type m_block_index;
//! pointer to the referenced block. undefined while m_live is false.
block_type* m_block;
//! pointer to the current element inside the referenced block.
//! undefined while m_live is false.
internal_size_type m_current;
public:
//! default constructor (should not be used directly)
iterator()
: m_writer(NULL), m_live(false), m_index(0)
{ }
//! construct a new iterator
iterator(writer_type* writer, external_size_type index)
: m_writer(writer),
m_live(false),
m_index(index)
{
STXXL_DEBUG("Construct iterator for index " << m_index);
}
//! copy an iterator, the new iterator is _not_ automatically live!
iterator(const iterator& other)
: m_writer(other.m_writer),
m_live(false),
m_index(other.m_index)
{
STXXL_DEBUG("Copy-Construct iterator for index " << m_index);
}
//! assign an iterator, the assigned iterator is not automatically live!
iterator& operator = (const iterator& other)
{
if (&other != this)
{
STXXL_DEBUG("Assign iterator to index " << other.m_index);
if (m_live)
m_writer->free_block_ref(m_block_index);
m_writer = other.m_writer;
m_live = false;
m_index = other.m_index;
}
return *this;
}
~iterator()
{
if (!m_live) return; // no need for cleanup
m_writer->free_block_ref(m_block_index);
STXXL_DEBUG("Destruction of iterator for index " << m_index <<
" in block " << m_index / block_items);
}
//! return the current absolute index inside the external array.
external_size_type get_index() const
{
return m_index;
}
//! allocates a reference to the block's data (possibly reading it from
//! EM).
void make_live()
{
assert(!m_live);
// calculate block and index inside
m_block_index = m_index / block_items;
m_current = m_index % block_items;
STXXL_DEBUG("operator*() live request for index=" << m_index <<
" block_index=" << m_block_index <<
" m_current=" << m_current);
// get block reference
m_block = m_writer->get_block_ref(m_block_index);
m_live = true;
}
//! access the current item
reference operator * ()
{
if (UNLIKELY(!m_live))
make_live();
return (*m_block)[m_current];
}
//! access the current item
pointer operator -> ()
{
return &(operator * ());
}
//! prefix-increment operator
self_type& operator ++ ()
{
++m_index;
if (UNLIKELY(!m_live)) return *this;
// if index stays in the same block, everything is fine
++m_current;
if (LIKELY(m_current != block_items)) return *this;
// release current block
m_writer->free_block_ref(m_block_index);
m_live = false;
return *this;
}
self_type operator + (difference_type addend) const
{
return self_type(m_writer, m_index + addend);
}
self_type operator - (difference_type subtrahend) const
{
return self_type(m_writer, m_index - subtrahend);
}
difference_type operator - (const self_type& o) const
{
return (m_index - o.m_index);
}
bool operator == (const self_type& o) const
{
return m_index == o.m_index;
}
bool operator != (const self_type& o) const
{
return m_index != o.m_index;
}
bool operator < (const self_type& o) const
{
return m_index < o.m_index;
}
bool operator <= (const self_type& o) const
{
return m_index <= o.m_index;
}
bool operator > (const self_type& o) const
{
return m_index > o.m_index;
}
bool operator >= (const self_type& o) const
{
return m_index >= o.m_index;
}
};
public:
external_array_writer(ea_type& ea, unsigned int num_threads = 0)
: m_ea(ea),
m_ref_count(ea.num_blocks(), 0)
{
#ifndef NDEBUG
m_ref_total = 0;
#endif
#if STXXL_PARALLEL
if (num_threads == 0)
num_threads = omp_get_max_threads();
#else
if (num_threads == 0)
num_threads = 1;
#endif
m_ea.prepare_write(num_threads);
// optimization: hold live iterators for the boundary blocks which two
// threads write to. this prohibits the blocks to be written to disk
// and read again.
double step = (double)m_ea.capacity() / (double)num_threads;
m_live_boundary.resize(num_threads - 1);
for (unsigned int i = 0; i < num_threads - 1; ++i)
{
external_size_type index = (external_size_type)((i + 1) * step);
STXXL_DEBUG("hold index " << index <<
" in block " << index / ea_type::block_items);
m_live_boundary[i] = iterator(this, index);
m_live_boundary[i].make_live();
}
}
~external_array_writer()
{
m_live_boundary.clear(); // release block boundaries
#ifndef NDEBUG
STXXL_ASSERT(m_ref_total == 0);
#endif
m_ea.finish_write();
}
iterator begin()
{
return iterator(this, 0);
}
iterator end()
{
return iterator(this, m_ea.capacity());
}
};
/*!
* The minima_tree contains minima from all sources inside the PPQ. It contains
* four substructures: winner trees for insertion heaps, internal and external
* arrays, each containing the minima from all currently allocated
* structures. These three sources, plus the deletion buffer are combined using
* a "head" inner tree containing only up to four item.
*/
template <class ParentType>
class minima_tree
{
public:
typedef ParentType parent_type;
typedef minima_tree<ParentType> self_type;
typedef typename parent_type::inv_compare_type compare_type;
typedef typename parent_type::value_type value_type;
typedef typename parent_type::proc_vector_type proc_vector_type;
typedef typename parent_type::internal_arrays_type ias_type;
typedef typename parent_type::external_arrays_type eas_type;
static const unsigned initial_ia_size = 2;
static const unsigned initial_ea_size = 2;
protected:
//! WinnerTree-Comparator for the head winner tree. It accesses all
//! relevant data structures from the priority queue.
struct head_comp
{
self_type& m_parent;
proc_vector_type& m_proc;
ias_type& m_ias;
const compare_type& m_compare;
head_comp(self_type& parent, proc_vector_type& proc,
ias_type& ias, const compare_type& compare)
: m_parent(parent),
m_proc(proc),
m_ias(ias),
m_compare(compare)
{ }
const value_type & get_value(int input) const
{
switch (input) {
case HEAP:
return m_proc[m_parent.m_heaps.top()]->insertion_heap[0];
case IA:
return m_ias[m_parent.m_ia.top()].get_min();
case EB:
return m_parent.m_parent.m_extract_buffer[
m_parent.m_parent.m_extract_buffer_index
];
default:
abort();
}
}
bool operator () (const int a, const int b) const
{
return m_compare(get_value(a), get_value(b));
}
};
//! Comparator for the insertion heaps winner tree.
struct heaps_comp
{
proc_vector_type& m_proc;
const compare_type& m_compare;
heaps_comp(proc_vector_type& proc, const compare_type& compare)
: m_proc(proc), m_compare(compare)
{ }
const value_type & get_value(int index) const
{
return m_proc[index]->insertion_heap[0];
}
bool operator () (const int a, const int b) const
{
return m_compare(get_value(a), get_value(b));
}
};
//! Comparator for the internal arrays winner tree.
struct ia_comp
{
ias_type& m_ias;
const compare_type& m_compare;
ia_comp(ias_type& ias, const compare_type& compare)
: m_ias(ias), m_compare(compare)
{ }
bool operator () (const int a, const int b) const
{
return m_compare(m_ias[a].get_min(), m_ias[b].get_min());
}
};
protected:
//! The priority queue
parent_type& m_parent;
//! value_type comparator
const compare_type& m_compare;
//! Comperator instances
head_comp m_head_comp;
heaps_comp m_heaps_comp;
ia_comp m_ia_comp;
//! The winner trees
winner_tree<head_comp> m_head;
winner_tree<heaps_comp> m_heaps;
winner_tree<ia_comp> m_ia;
public:
//! Entries in the head winner tree.
enum Types {
HEAP = 0,
IA = 1,
EB = 2,
TYPE_ERROR = 3
};
//! Construct the tree of minima sources.
minima_tree(parent_type& parent)
: m_parent(parent),
m_compare(parent.m_inv_compare),
// construct comparators
m_head_comp(*this, parent.m_proc,
parent.m_internal_arrays, m_compare),
m_heaps_comp(parent.m_proc, m_compare),
m_ia_comp(parent.m_internal_arrays, m_compare),
// construct header winner tree
m_head(3, m_head_comp),
m_heaps(m_parent.m_num_insertion_heaps, m_heaps_comp),
m_ia(initial_ia_size, m_ia_comp)
{ }
//! Return smallest items of head winner tree.
std::pair<unsigned, unsigned> top()
{
unsigned type = m_head.top();
switch (type)
{
case HEAP:
return std::make_pair(HEAP, m_heaps.top());
case IA:
return std::make_pair(IA, m_ia.top());
case EB:
return std::make_pair(EB, 0);
default:
return std::make_pair(TYPE_ERROR, 0);
}
}
//! Update minima tree after an item from the heap index was removed.
void update_heap(int_type index)
{
m_heaps.notify_change(index);
m_head.notify_change(HEAP);
}
//! Update minima tree after an item of the extract buffer was removed.
void update_extract_buffer()
{
m_head.notify_change(EB);
}
//! Update minima tree after an item from an internal array was removed.
void update_internal_array(unsigned index)
{
m_ia.notify_change(index);
m_head.notify_change(IA);
}
//! Add a newly created internal array to the minima tree.
void add_internal_array(unsigned index)
{
m_ia.activate_player(index);
m_head.notify_change(IA);
}
//! Remove an insertion heap from the minima tree.
void deactivate_heap(unsigned index)
{
m_heaps.deactivate_player(index);
if (!m_heaps.empty())
m_head.notify_change(HEAP);
else
m_head.deactivate_player(HEAP);
}
//! Remove the extract buffer from the minima tree.
void deactivate_extract_buffer()
{
m_head.deactivate_player(EB);
}
//! Remove an internal array from the minima tree.
void deactivate_internal_array(unsigned index)
{
m_ia.deactivate_player(index);
if (!m_ia.empty())
m_head.notify_change(IA);
else
m_head.deactivate_player(IA);
}
//! Remove all insertion heaps from the minima tree.
void clear_heaps()
{
m_heaps.clear();
m_head.deactivate_player(HEAP);
}
//! Remove all internal arrays from the minima tree.
void clear_internal_arrays()
{
m_ia.resize_and_clear(initial_ia_size);
m_head.deactivate_player(IA);
}
void rebuild_internal_arrays()
{
if (!m_parent.m_internal_arrays.empty())
{
m_ia.resize_and_rebuild(m_parent.m_internal_arrays.size());
m_head.notify_change(IA);
}
else
{
m_head.deactivate_player(IA);
}
}
//! Return size of internal arrays minima tree
size_t ia_slots() const
{
return m_ia.num_slots();
}
//! Returns a readable representation of the winner tree as string.
std::string to_string() const
{
std::ostringstream ss;
ss << "Head:" << std::endl << m_head.to_string() << std::endl;
ss << "Heaps:" << std::endl << m_heaps.to_string() << std::endl;
ss << "IA:" << std::endl << m_ia.to_string() << std::endl;
return ss.str();
}
//! Prints statistical data.
void print_stats() const
{
STXXL_MSG("Head winner tree stats:");
m_head.print_stats();
STXXL_MSG("Heaps winner tree stats:");
m_heaps.print_stats();
STXXL_MSG("IA winner tree stats:");
m_ia.print_stats();
}
};
} // namespace ppq_local
/*!
* Parallelized External Memory Priority Queue.
*
* \tparam ValueType Type of the contained objects (POD with no references to
* internal memory).
*
* \tparam CompareType The comparator type used to determine whether one
* element is smaller than another element.
*
* \tparam DefaultMemSize Maximum memory consumption by the queue. Can be
* overwritten by the constructor. Default = 1 GiB.
*
* \tparam MaxItems Maximum number of elements the queue contains at one
* time. Default = 0 = unlimited. This is no hard limit and only used for
* optimization. Can be overwritten by the constructor.
*
* \tparam BlockSize External block size. Default =
* STXXL_DEFAULT_BLOCK_SIZE(ValueType).
*
* \tparam AllocStrategy Allocation strategy for the external memory. Default =
* STXXL_DEFAULT_ALLOC_STRATEGY.
*/
template <
class ValueType,
class CompareType = std::less<ValueType>,
class AllocStrategy = STXXL_DEFAULT_ALLOC_STRATEGY,
uint64 BlockSize = STXXL_DEFAULT_BLOCK_SIZE(ValueType),
uint64 DefaultMemSize = 1* 1024L* 1024L* 1024L,
uint64 MaxItems = 0
>
class parallel_priority_queue : private noncopyable
{
//! \name Types
//! \{
public:
typedef ValueType value_type;
typedef CompareType compare_type;
typedef AllocStrategy alloc_strategy;
static const uint64 block_size = BlockSize;
typedef uint64 size_type;
typedef typed_block<block_size, value_type> block_type;
typedef std::vector<BID<block_size> > bid_vector;
typedef bid_vector bids_container_type;
typedef read_write_pool<block_type> pool_type;
typedef ppq_local::internal_array<value_type> internal_array_type;
typedef ppq_local::external_array<value_type, block_size, AllocStrategy> external_array_type;
typedef typename external_array_type::writer_type external_array_writer_type;
typedef typename std::vector<value_type>::iterator value_iterator;
typedef typename internal_array_type::iterator iterator;
typedef std::pair<iterator, iterator> iterator_pair_type;
static const bool debug = false;
//! currently global public tuning parameter:
unsigned_type c_max_internal_level_size;
//! currently global public tuning parameter:
unsigned_type c_max_external_level_size;
protected:
//! type of insertion heap itself
typedef std::vector<value_type> heap_type;
//! type of internal arrays vector
typedef typename stxxl::swap_vector<internal_array_type> internal_arrays_type;
//! type of external arrays vector
typedef typename stxxl::swap_vector<external_array_type> external_arrays_type;
//! type of minima tree combining the structures
typedef ppq_local::minima_tree<
parallel_priority_queue<value_type, compare_type, alloc_strategy,
block_size, DefaultMemSize, MaxItems> > minima_type;
//! allow minima tree access to internal data structures
friend class ppq_local::minima_tree<
parallel_priority_queue<value_type, compare_type, alloc_strategy,
block_size, DefaultMemSize, MaxItems> >;
//! Inverse comparison functor
struct inv_compare_type
{
const compare_type& compare;
inv_compare_type(const compare_type& c)
: compare(c)
{ }
bool operator () (const value_type& x, const value_type& y) const
{
return compare(y, x);
}
};
//! <-Comparator for value_type
compare_type m_compare;
//! >-Comparator for value_type
inv_compare_type m_inv_compare;
//! Defines if statistics are gathered: dummy_custom_stats_counter or
//! custom_stats_counter
typedef dummy_custom_stats_counter<uint64> stats_counter;
//! Defines if statistics are gathered: fake_timer or timer
typedef fake_timer stats_timer;
//! \}
//! \name Compile-Time Parameters
//! \{
//! Merge sorted heaps when flushing into an internal array.
//! Pro: Reduces the risk of a large winner tree
//! Con: Flush insertion heaps becomes slower.
static const bool c_merge_sorted_heaps = true;
//! Default number of write buffer block for a new external array being
//! filled.
static const unsigned c_num_write_buffer_blocks = 14;
//! Defines for how much external arrays memory should be reserved in the
//! constructor.
static const unsigned c_num_reserved_external_arrays = 10;
//! Size of a single insertion heap in Byte, if not defined otherwise in
//! the constructor. Default: 1 MiB
static const size_type c_default_single_heap_ram = 1L * 1024L * 1024L;
//! Default limit of the extract buffer ram consumption as share of total
//! ram
// C++11: constexpr static double c_default_extract_buffer_ram_part = 0.05;
// C++98 does not allow static const double initialization here.
// It's located in global scope instead.
static const double c_default_extract_buffer_ram_part;
/*!
* Limit the size of the extract buffer to an absolute value.
*
* The actual size can be set using the extract_buffer_ram parameter of the
* constructor. If this parameter is not set, the value is calculated by
* (total_ram*c_default_extract_buffer_ram_part)
*
* If c_limit_extract_buffer==false, the memory consumption of the extract
* buffer is only limited by the number of external and internal
* arrays. This is considered in memory management using the
* ram_per_external_array and ram_per_internal_array values. Attention:
* Each internal array reserves space for the extract buffer in the size of
* all heaps together.
*/
static const bool c_limit_extract_buffer = true;
//! For bulks of size up to c_single_insert_limit sequential single insert
//! is faster than bulk_push.
static const unsigned c_single_insert_limit = 100;
//! \}
//! \name Parameters and Sizes for Memory Allocation Policy
//! Number of insertion heaps. Usually equal to the number of CPUs.
const long m_num_insertion_heaps;
//! Capacity of one inserion heap
const unsigned m_insertion_heap_capacity;
//! Return size of insertion heap reservation in bytes
size_type insertion_heap_int_memory() const
{
return m_insertion_heap_capacity * sizeof(value_type);
}
//! Total amount of internal memory
const size_type m_mem_total;
//! Maximum size of extract buffer in number of elements
//! Only relevant if c_limit_extract_buffer==true
size_type m_extract_buffer_limit;
//! Size of all insertion heaps together in bytes
const size_type m_mem_for_heaps;
//! Number of read/prefetch blocks per external array.
const float m_num_read_blocks_per_ea;
//! Total number of read/prefetch buffer blocks
unsigned_type m_num_read_blocks;
//! number of currently hinted prefetch blocks
unsigned_type m_num_hinted_blocks;
//! number of currently loaded blocks
unsigned_type m_num_used_read_blocks;
//! Free memory in bytes
size_type m_mem_left;
//! \}
//! Flag if inside a bulk_push sequence.
bool m_in_bulk_push;
//! If the bulk currently being inserted is very large, this boolean is set
//! and bulk_push just accumulate the elements for eventual sorting.
bool m_is_very_large_bulk;
//! First index in m_external_arrays that was not re-hinted during a
//! bulk_push sequence.
unsigned_type m_bulk_first_delayed_external_array;
//! Index of the currently smallest element in the extract buffer
size_type m_extract_buffer_index;
//! \name Number of elements currently in the data structures
//! \{
//! Number of elements int the insertion heaps
size_type m_heaps_size;
//! Number of elements in the extract buffer
size_type m_extract_buffer_size;
//! Number of elements in the internal arrays
size_type m_internal_size;
//! Number of elements in the external arrays
size_type m_external_size;
//! \}
//! \name Data Holding Structures
//! \{
//! A struct containing the local insertion heap and other information
//! _local_ to a processor.
struct ProcessorData
{
//! The heaps where new elements are usually inserted into
heap_type insertion_heap;
//! The number of items inserted into the insheap during bulk parallel
//! access.
size_type heap_add_size;
};
typedef std::vector<ProcessorData*> proc_vector_type;
//! Array of processor local data structures, including the insertion heaps.
proc_vector_type m_proc;
//! Prefetch and write buffer pool for external arrays (has to be in front
//! of m_external_arrays)
pool_type m_pool;
//! The extract buffer where external (and internal) arrays are merged into
//! for extracting
std::vector<value_type> m_extract_buffer;
//! The sorted arrays in internal memory
internal_arrays_type m_internal_arrays;
//! The sorted arrays in external memory
external_arrays_type m_external_arrays;
//! The aggregated pushes. They cannot be extracted yet.
std::vector<value_type> m_aggregated_pushes;
//! The maximum number of internal array levels.
static const unsigned_type c_max_internal_levels = 8;
//! The number of internal arrays on each level, we use plain array.
unsigned_type m_internal_levels[c_max_internal_levels];
//! The maximum number of external array levels.
static const unsigned_type c_max_external_levels = 8;
//! The number of external arrays on each level, we use plain array.
unsigned_type m_external_levels[c_max_external_levels];
//! The winner tree containing the smallest values of all sources
//! where the globally smallest element could come from.
minima_type m_minima;
//! Compares the largest accessible value of two external arrays.
struct external_min_comparator {
const external_arrays_type& m_eas;
const inv_compare_type& m_compare;
external_min_comparator(const external_arrays_type& eas,
const inv_compare_type& compare)
: m_eas(eas), m_compare(compare) { }
bool operator () (const size_t& a, const size_t& b) const
{
return m_compare(m_eas[a].get_next_block_min(),
m_eas[b].get_next_block_min());
}
} m_external_min_comparator;
//! Tracks the largest accessible values of the external arrays if there
//! is unaccessible data in EM. The winning array is the first one that
//! needs to fetch further data from EM. Used in calculate_merge_sequences.
winner_tree<external_min_comparator> m_external_min_tree;
//! Compares the largest value of the block hinted the latest of two
//! external arrays.
struct hint_comparator {
const external_arrays_type& m_eas;
const inv_compare_type& m_compare;
hint_comparator(const external_arrays_type& eas,
const inv_compare_type& compare)
: m_eas(eas), m_compare(compare) { }
bool operator () (const size_t& a, const size_t& b) const
{
return m_compare(m_eas[a].get_next_hintable_min(),
m_eas[b].get_next_hintable_min());
}
} m_hint_comparator;
//! Tracks the largest values of the block hinted the latest of the
//! external arrays if there is unaccessible data in EM. The winning
//! array is the first one that needs to fetch further data from EM.
//! Used for prefetch hints.
winner_tree<hint_comparator> m_hint_tree;
//! Random number generator for randomly selecting a heap in sequential
//! push()
random_number32_r m_rng;
//! \}
/*
* Helper function to remove empty internal/external arrays.
*/
//! Unary operator which returns true if the external array has run empty.
struct empty_external_array_eraser {
bool operator () (external_array_type& a) const
{ return a.empty(); }
};
//! Unary operator which returns true if the internal array has run empty.
struct empty_internal_array_eraser {
bool operator () (internal_array_type& a) const
{ return a.empty(); }
};
//! Clean up empty internal arrays, free their memory and capacity
void cleanup_internal_arrays()
{
typename internal_arrays_type::iterator swap_end =
stxxl::swap_remove_if(m_internal_arrays.begin(),
m_internal_arrays.end(),
empty_internal_array_eraser());
for (typename internal_arrays_type::iterator ia = swap_end;
ia != m_internal_arrays.end(); ++ia)
{
m_mem_left += ia->int_memory();
--m_internal_levels[ia->level()];
}
if (swap_end != m_internal_arrays.end())
STXXL_DEBUG0("cleanup_internal_arrays" <<
" cleaned=" << m_internal_arrays.end() - swap_end);
m_internal_arrays.erase(swap_end, m_internal_arrays.end());
m_minima.rebuild_internal_arrays();
}
//! Clean up empty external arrays, free their memory and capacity
void cleanup_external_arrays()
{
typedef typename external_arrays_type::iterator ea_iterator;
empty_external_array_eraser pred;
// The following is a modified implementation of swap_remove_if().
// Updates m_external_min_tree accordingly.
ea_iterator first = m_external_arrays.begin();
ea_iterator last = m_external_arrays.end();
ea_iterator swap_end = first;
size_t size = m_external_arrays.end() - m_external_arrays.begin();
size_t first_removed = size;
while (first != last)
{
if (!pred(*first))
{
using std::swap;
swap(*first, *swap_end);
++swap_end;
}
else if (first_removed >= size)
{
first_removed = first - m_external_arrays.begin();
}
++first;
}
// subtract memory of EAs, which will be freed
for (ea_iterator ea = swap_end; ea != last; ++ea) {
m_mem_left += ea->int_memory();
--m_external_levels[ea->level()];
}
size_t swap_end_index = swap_end - m_external_arrays.begin();
// Deactivating all affected players first.
// Otherwise there might be outdated comparisons.
for (size_t i = size; i != first_removed; ) {
--i;
m_external_min_tree.deactivate_player_step(i);
// TODO delay if (m_in_bulk_push)?
m_hint_tree.deactivate_player_step(i);
}
// Replay moved arrays.
for (size_t i = first_removed; i < swap_end_index; ++i) {
update_external_min_tree(i);
// TODO delay if (m_in_bulk_push)?
update_hint_tree(i);
}
STXXL_DEBUG("Removed " << m_external_arrays.end() - swap_end <<
" empty external arrays.");
m_external_arrays.erase(swap_end, m_external_arrays.end());
resize_read_pool(); // shrinks read/prefetch pool
}
/*!
* SiftUp a new element from the last position in the heap, reestablishing
* the heap invariant. This is identical to std::push_heap, except that it
* returns the last element modified by siftUp. Thus we can identify if the
* minimum may have changed.
*/
template <typename RandomAccessIterator, typename HeapCompareType>
static inline unsigned_type
push_heap(RandomAccessIterator first, RandomAccessIterator last,
HeapCompareType comp)
{
typedef typename std::iterator_traits<RandomAccessIterator>::value_type
value_type;
value_type value = STXXL_MOVE(*(last - 1));
unsigned_type index = (last - first) - 1;
unsigned_type parent = (index - 1) / 2;
while (index > 0 && comp(*(first + parent), value))
{
*(first + index) = STXXL_MOVE(*(first + parent));
index = parent;
parent = (index - 1) / 2;
}
*(first + index) = STXXL_MOVE(value);
return index;
}
public:
//! \name Initialization
//! \{
/*!
* Constructor.
*
* \param compare Comparator for priority queue, which is a Max-PQ.
*
* \param total_ram Maximum RAM usage. 0 = Default = Use the template
* value DefaultMemSize.
*
* \param num_read_blocks_per_ea Number of read blocks per external
* array. Default = 1.5f
*
* \param num_write_buffer_blocks Number of write buffer blocks for a new
* external array being filled. 0 = Default = c_num_write_buffer_blocks
*
* \param num_insertion_heaps Number of insertion heaps. 0 = Default =
* Determine by omp_get_max_threads().
*
* \param single_heap_ram Memory usage for a single insertion heap.
* Default = c_single_heap_ram.
*
* \param extract_buffer_ram Memory usage for the extract buffer. Only
* relevant if c_limit_extract_buffer==true. 0 = Default = total_ram *
* c_default_extract_buffer_ram_part.
*/
parallel_priority_queue(
const compare_type& compare = compare_type(),
size_type total_ram = DefaultMemSize,
float num_read_blocks_per_ea = 1.5f,
unsigned_type num_write_buffer_blocks = c_num_write_buffer_blocks,
unsigned_type num_insertion_heaps = 0,
size_type single_heap_ram = c_default_single_heap_ram,
size_type extract_buffer_ram = 0)
: c_max_internal_level_size(64),
c_max_external_level_size(64),
m_compare(compare),
m_inv_compare(m_compare),
// Parameters and Sizes for Memory Allocation Policy
#if STXXL_PARALLEL
m_num_insertion_heaps(num_insertion_heaps > 0 ? num_insertion_heaps : omp_get_max_threads()),
#else
m_num_insertion_heaps(num_insertion_heaps > 0 ? num_insertion_heaps : 1),
#endif
m_insertion_heap_capacity(single_heap_ram / sizeof(value_type)),
m_mem_total(total_ram),
m_mem_for_heaps(m_num_insertion_heaps * single_heap_ram),
m_num_read_blocks_per_ea(num_read_blocks_per_ea),
m_num_read_blocks(0),
m_num_hinted_blocks(0),
m_num_used_read_blocks(0),
// (unnamed)
m_in_bulk_push(false),
m_is_very_large_bulk(false),
m_extract_buffer_index(0),
// Number of elements currently in the data structures
m_heaps_size(0),
m_extract_buffer_size(0),
m_internal_size(0),
m_external_size(0),
// Data Holding Structures
m_proc(m_num_insertion_heaps),
m_pool(0, num_write_buffer_blocks),
m_external_arrays(),
m_minima(*this),
m_external_min_comparator(m_external_arrays, m_inv_compare),
m_external_min_tree(4, m_external_min_comparator),
m_hint_comparator(m_external_arrays, m_inv_compare),
m_hint_tree(4, m_hint_comparator),
// flags
m_limit_extract(false)
{
#if STXXL_PARALLEL
if (!omp_get_nested()) {
omp_set_nested(1);
if (!omp_get_nested()) {
STXXL_ERRMSG("Could not enable OpenMP's nested parallelism, "
"however, the PPQ requires this OpenMP feature.");
abort();
}
}
#else
STXXL_ERRMSG("You are using stxxl::parallel_priority_queue without "
"support for OpenMP parallelism.");
STXXL_ERRMSG("This is probably not what you want, so check the "
"compilation settings.");
#endif
if (c_limit_extract_buffer) {
m_extract_buffer_limit = (extract_buffer_ram > 0)
? extract_buffer_ram / sizeof(value_type)
: static_cast<size_type>(((double)(m_mem_total) * c_default_extract_buffer_ram_part / sizeof(value_type)));
}
for (unsigned_type i = 0; i < c_max_internal_levels; ++i)
m_internal_levels[i] = 0;
for (unsigned_type i = 0; i < c_max_external_levels; ++i)
m_external_levels[i] = 0;
// TODO: Do we still need this line? Insertion heap memory is
// registered below. And merge buffer is equal to the new IA...
// total_ram - ram for the heaps - ram for the heap merger
m_mem_left = m_mem_total - 2 * m_mem_for_heaps;
// reverse insertion heap memory on processor-local memory
#if STXXL_PARALLEL
#pragma omp parallel for
#endif
for (long p = 0; p < m_num_insertion_heaps; ++p)
{
m_proc[p] = new ProcessorData;
m_proc[p]->insertion_heap.reserve(m_insertion_heap_capacity);
assert(m_proc[p]->insertion_heap.capacity() * sizeof(value_type)
== insertion_heap_int_memory());
}
m_mem_left -= m_num_insertion_heaps * insertion_heap_int_memory();
// prepare prefetch buffer pool (already done in initializer),
// initially zero.
// prepare write buffer pool: calculate size and subtract from mem_left
external_array_type::prepare_write_pool(m_pool, m_num_insertion_heaps);
m_mem_left -= m_pool.size_write() * block_size;
// prepare internal arrays
if (c_merge_sorted_heaps) {
m_internal_arrays.reserve(m_mem_total / m_mem_for_heaps);
}
else {
m_internal_arrays.reserve(m_mem_total * m_num_insertion_heaps / m_mem_for_heaps);
}
// prepare external arrays
m_external_arrays.reserve(c_num_reserved_external_arrays);
if (m_mem_total < m_mem_left) // checks if unsigned type wrapped.
{
STXXL_ERRMSG("Minimum memory requirement insufficient, "
"increase PPQ's memory limit or decrease buffers.");
abort();
}
check_invariants();
}
//! Destructor.
~parallel_priority_queue()
{
// clean up data structures
for (size_t p = 0; p < m_num_insertion_heaps; ++p)
{
delete m_proc[p];
}
}
protected:
//! Assert many invariants of the data structures.
void check_invariants() const
{
#ifdef NDEBUG
// disable in Release builds
return;
#endif
size_type mem_used = 0;
mem_used += 2 * m_mem_for_heaps
+ m_pool.size_write() * block_size
+ m_pool.free_size_prefetch() * block_size
+ m_num_hinted_blocks * block_size
+ m_num_used_read_blocks * block_size;
// count number of blocks hinted in prefetcher
size_t num_hinted = 0, num_used_read = 0;
for (size_t i = 0; i < m_external_arrays.size(); ++i) {
num_hinted += m_external_arrays[i].num_hinted_blocks();
num_used_read += m_external_arrays[i].num_used_blocks();
}
STXXL_CHECK(num_hinted == m_num_hinted_blocks);
STXXL_CHECK(num_used_read == m_num_used_read_blocks);
STXXL_CHECK_EQUAL(m_num_used_read_blocks,
m_num_read_blocks
- m_pool.free_size_prefetch()
- m_num_hinted_blocks);
// test the processor local data structures
size_type heaps_size = 0;
for (int_type p = 0; p < m_num_insertion_heaps; ++p)
{
// check that each insertion heap is a heap
// TODO: remove soon, because this is very expensive
STXXL_CHECK(1 || stxxl::is_heap(m_proc[p]->insertion_heap.begin(),
m_proc[p]->insertion_heap.end(),
m_compare));
STXXL_CHECK(m_proc[p]->insertion_heap.capacity() <= m_insertion_heap_capacity);
heaps_size += m_proc[p]->insertion_heap.size();
mem_used += m_proc[p]->insertion_heap.capacity() * sizeof(value_type);
}
if (!m_in_bulk_push)
STXXL_CHECK_EQUAL(m_heaps_size, heaps_size);
// count number of items and memory size of internal arrays
size_type ia_size = 0;
size_type ia_memory = 0;
std::vector<unsigned_type> ia_levels(c_max_internal_levels, 0);
for (typename internal_arrays_type::const_iterator ia =
m_internal_arrays.begin(); ia != m_internal_arrays.end(); ++ia)
{
ia_size += ia->size();
ia_memory += ia->int_memory();
++ia_levels[ia->level()];
}
STXXL_CHECK_EQUAL(m_internal_size, ia_size);
mem_used += ia_memory;
for (unsigned_type i = 0; i < c_max_internal_levels; ++i)
STXXL_CHECK_EQUAL(m_internal_levels[i], ia_levels[i]);
// count number of items in external arrays
size_type ea_size = 0;
size_type ea_memory = 0;
std::vector<unsigned_type> ea_levels(c_max_external_levels, 0);
for (typename external_arrays_type::const_iterator ea =
m_external_arrays.begin(); ea != m_external_arrays.end(); ++ea)
{
ea_size += ea->size();
ea_memory += ea->int_memory();
++ea_levels[ea->level()];
}
STXXL_CHECK_EQUAL(m_external_size, ea_size);
mem_used += ea_memory;
for (unsigned_type i = 0; i < c_max_external_levels; ++i)
STXXL_CHECK_EQUAL(m_external_levels[i], ea_levels[i]);
// calculate mem_used so that == mem_total - mem_left
STXXL_CHECK_EQUAL(memory_consumption(), mem_used);
}
//! \}
//! \name Properties
//! \{
public:
//! The number of elements in the queue.
inline size_type size() const
{
return m_heaps_size + m_internal_size + m_external_size + m_extract_buffer_size;
}
//! Returns if the queue is empty.
inline bool empty() const
{
return (size() == 0);
}
//! The memory consumption in Bytes.
inline size_type memory_consumption() const
{
assert(m_mem_total >= m_mem_left);
return (m_mem_total - m_mem_left);
}
protected:
//! Returns if the extract buffer is empty.
inline bool extract_buffer_empty() const
{
return (m_extract_buffer_size == 0);
}
//! \}
public:
//! \name Bulk Operations
//! \{
/*!
* Start a sequence of push operations.
* \param bulk_size Exact number of elements to push before the next pop.
*/
void bulk_push_begin(size_type bulk_size)
{
assert(!m_in_bulk_push);
m_in_bulk_push = true;
m_bulk_first_delayed_external_array = m_external_arrays.size();
size_type heap_capacity = m_num_insertion_heaps * m_insertion_heap_capacity;
// if bulk_size is large: use simple aggregation instead of keeping the
// heap property and sort everything afterwards.
if (bulk_size > heap_capacity && 0) {
m_is_very_large_bulk = true;
}
else {
m_is_very_large_bulk = false;
if (bulk_size + m_heaps_size > heap_capacity) {
if (m_heaps_size > 0) {
//flush_insertion_heaps();
}
}
}
// zero bulk insertion counters
for (int_type p = 0; p < m_num_insertion_heaps; ++p)
m_proc[p]->heap_add_size = 0;
}
/*!
* Push an element inside a sequence of pushes.
* Run bulk_push_begin() before using this method.
*
* \param element The element to push.
* \param p The id of the insertion heap to use (usually the thread id).
*/
void bulk_push(const value_type& element, const unsigned_type p)
{
assert(m_in_bulk_push);
heap_type& insheap = m_proc[p]->insertion_heap;
if (!m_is_very_large_bulk && 0)
{
// if small bulk: if heap is full -> sort locally and put into
// internal array list. insert items and keep heap invariant.
if (UNLIKELY(insheap.size() >= m_insertion_heap_capacity)) {
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
m_proc[p]->heap_add_size = 0;
flush_insertion_heap(p);
}
assert(insheap.size() < insheap.capacity());
// put item onto heap and siftUp
insheap.push_back(element);
std::push_heap(insheap.begin(), insheap.end(), m_compare);
}
else if (!m_is_very_large_bulk && 1)
{
// if small bulk: if heap is full -> sort locally and put into
// internal array list. insert items but DO NOT keep heap
// invariant.
if (UNLIKELY(insheap.size() >= m_insertion_heap_capacity)) {
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
m_proc[p]->heap_add_size = 0;
flush_insertion_heap(p);
}
assert(insheap.size() < insheap.capacity());
// put item onto heap and DO NOT siftUp
insheap.push_back(element);
}
else // m_is_very_large_bulk
{
if (UNLIKELY(insheap.size() >= 2 * 1024 * 1024)) {
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
m_proc[p]->heap_add_size = 0;
flush_insertion_heap(p);
}
assert(insheap.size() < insheap.capacity());
// put onto insertion heap but do not keep heap property
insheap.push_back(element);
}
m_proc[p]->heap_add_size++;
}
/*!
* Push an element inside a bulk sequence of pushes. Run bulk_push_begin()
* before using this method. This function uses the insertion heap id =
* omp_get_thread_num().
*
* \param element The element to push.
*/
void bulk_push(const value_type& element)
{
#if STXXL_PARALLEL
return bulk_push(element, (unsigned_type)omp_get_thread_num());
#else
unsigned_type id = m_rng() % m_num_insertion_heaps;
return bulk_push(element, id);
#endif
}
/*!
* Ends a sequence of push operations. Run bulk_push_begin() and some
* bulk_push() before this.
*/
void bulk_push_end()
{
assert(m_in_bulk_push);
m_in_bulk_push = false;
if (!m_is_very_large_bulk && 0)
{
for (int_type p = 0; p < m_num_insertion_heaps; ++p)
{
m_heaps_size += m_proc[p]->heap_add_size;
if (!m_proc[p]->insertion_heap.empty())
m_minima.update_heap(p);
}
}
else if (!m_is_very_large_bulk && 1)
{
#if STXXL_PARALLEL
#pragma omp parallel for
#endif
for (int_type p = 0; p < m_num_insertion_heaps; ++p)
{
// reestablish heap property: siftUp only those items pushed
for (unsigned_type index = m_proc[p]->heap_add_size; index != 0; ) {
std::push_heap(m_proc[p]->insertion_heap.begin(),
m_proc[p]->insertion_heap.end() - (--index),
m_compare);
}
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
}
for (int_type p = 0; p < m_num_insertion_heaps; ++p)
{
if (!m_proc[p]->insertion_heap.empty())
m_minima.update_heap(p);
}
}
else // m_is_very_large_bulk
{
#if STXXL_PARALLEL
#pragma omp parallel for
#endif
for (int_type p = 0; p < m_num_insertion_heaps; ++p)
{
if (m_proc[p]->insertion_heap.size() >= m_insertion_heap_capacity) {
// flush out overfull insertion heap arrays
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
m_proc[p]->heap_add_size = 0;
flush_insertion_heap(p);
}
else {
// reestablish heap property: siftUp only those items pushed
for (unsigned_type index = m_proc[p]->heap_add_size; index != 0; ) {
std::push_heap(m_proc[p]->insertion_heap.begin(),
m_proc[p]->insertion_heap.end() - (--index),
m_compare);
}
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
m_proc[p]->heap_add_size = 0;
}
}
for (int_type p = 0; p < m_num_insertion_heaps; ++p)
{
if (!m_proc[p]->insertion_heap.empty())
m_minima.update_heap(p);
}
}
if (m_bulk_first_delayed_external_array != m_external_arrays.size()) {
STXXL_DEBUG("bulk_push_end: run delayed re-hinting of EAs");
rebuild_hint_tree();
}
check_invariants();
}
//! Extract up to max_size values at once.
void bulk_pop(std::vector<value_type>& out, size_t max_size)
{
STXXL_DEBUG("bulk_pop_size with max_size=" << max_size);
const size_t n_elements = std::min<size_t>(max_size, size());
assert(n_elements < m_extract_buffer_limit);
if (m_heaps_size > 0)
flush_insertion_heaps();
convert_eb_into_ia();
refill_extract_buffer(n_elements, n_elements);
out.resize(0);
using std::swap;
swap(m_extract_buffer, out);
m_extract_buffer_index = 0;
m_extract_buffer_size = 0;
m_minima.deactivate_extract_buffer();
check_invariants();
}
//! Extracts all elements which are greater or equal to a given limit.
//! \param out result vector
//! \param limit limit value
//! \param max_size maximum number of items to extract
//! \return true if the buffer contains all items < limit, false it was too
//! small.
bool bulk_pop_limit(std::vector<value_type>& out, const value_type& limit,
size_t max_size = std::numeric_limits<size_t>::max())
{
STXXL_DEBUG("bulk_pop_limit with limit=" << limit);
convert_eb_into_ia();
if (m_heaps_size > 0) {
if (0)
flush_insertion_heaps();
else if (1)
flush_insertion_heaps_with_limit(limit);
}
size_type ias = m_internal_arrays.size();
size_type eas = m_external_arrays.size();
std::vector<size_type> sizes(eas + ias);
std::vector<iterator_pair_type> sequences(eas + ias);
size_type output_size = 0;
int limiting_ea_index = m_external_min_tree.top();
// pop limit may have to change due to memory limit
value_type this_limit = limit;
bool has_full_range = true;
// get all relevant blocks
while (limiting_ea_index > -1)
{
const value_type& ea_limit =
m_external_arrays[limiting_ea_index].get_next_block_min();
if (m_compare(ea_limit, this_limit)) {
// No more EM data smaller or equal to limit
break;
}
if (m_external_arrays[limiting_ea_index].num_hinted_blocks() == 0) {
// No more read/prefetch blocks available for EA
this_limit = ea_limit;
has_full_range = false;
break;
}
wait_next_ea_blocks(limiting_ea_index);
// consider next limiting EA
limiting_ea_index = m_external_min_tree.top();
STXXL_ASSERT(limiting_ea_index < (int)eas);
}
// build sequences
for (size_type i = 0; i < eas + ias; ++i) {
iterator begin, end;
if (i < eas) {
assert(!m_external_arrays[i].empty());
assert(m_external_arrays[i].valid());
begin = m_external_arrays[i].begin();
end = m_external_arrays[i].end();
}
else {
size_type j = i - eas;
assert(!(m_internal_arrays[j].empty()));
begin = m_internal_arrays[j].begin();
end = m_internal_arrays[j].end();
}
end = std::lower_bound(begin, end, this_limit, m_inv_compare);
sizes[i] = std::distance(begin, end);
sequences[i] = std::make_pair(begin, end);
}
output_size = std::accumulate(sizes.begin(), sizes.end(), 0);
if (output_size > max_size) {
output_size = max_size;
has_full_range = false;
}
out.resize(output_size);
STXXL_DEBUG("bulk_pop_limit with" <<
" sequences=" << sequences.size() <<
" output_size=" << output_size <<
" has_full_range=" << has_full_range);
potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
out.begin(), output_size, m_inv_compare);
advance_arrays(sequences, sizes, eas, ias);
check_invariants();
return has_full_range;
}
#if TODO_MAYBE_FIXUP_LATER
/*!
* Insert a vector of elements at one time.
* \param elements Vector containing the elements to push.
* Attention: elements vector may be owned by the PQ afterwards.
*/
void bulk_push_vector(std::vector<value_type>& elements)
{
size_type heap_capacity = m_num_insertion_heaps * m_insertion_heap_capacity;
if (elements.size() > heap_capacity / 2) {
flush_array(elements);
return;
}
bulk_push_begin(elements.size());
#if STXXL_PARALLEL
#pragma omp parallel
{
const unsigned thread_num = omp_get_thread_num();
#pragma omp parallel for
for (size_type i = 0; i < elements.size(); ++i) {
bulk_push(elements[i], thread_num);
}
}
#else
const unsigned thread_num = m_rng() % m_num_insertion_heaps;
for (size_type i = 0; i < elements.size(); ++i) {
bulk_push(elements[i], thread_num);
}
#endif
bulk_push_end();
}
#endif
//! \}
//! \name Aggregation Operations
//! \{
/*!
* Aggregate pushes. Use flush_aggregated_pushes() to finally push
* them. extract_min is allowed is allowed in between the aggregation of
* pushes if you can assure, that the extracted value is smaller than all
* of the aggregated values.
* \param element The element to push.
*/
void aggregate_push(const value_type& element)
{
m_aggregated_pushes.push_back(element);
}
#if TODO_MAYBE_FIXUP_LATER
/*!
* Insert the aggregated values into the queue using push(), bulk insert,
* or sorting, depending on the number of aggregated values.
*/
void flush_aggregated_pushes()
{
size_type size = m_aggregated_pushes.size();
size_type ram_internal = 2 * size * sizeof(value_type); // ram for the sorted array + part of the ram for the merge buffer
size_type heap_capacity = m_num_insertion_heaps * m_insertion_heap_capacity;
if (ram_internal > m_mem_for_heaps / 2) {
flush_array(m_aggregated_pushes);
}
else if ((m_aggregated_pushes.size() > c_single_insert_limit) && (m_aggregated_pushes.size() < heap_capacity)) {
bulk_push_vector(m_aggregated_pushes);
}
else {
for (value_iterator i = m_aggregated_pushes.begin(); i != m_aggregated_pushes.end(); ++i) {
push(*i);
}
}
m_aggregated_pushes.clear();
}
#endif
//! \}
//! \name std::priority_queue compliant operations
//! \{
/*!
* Insert new element
* \param element the element to insert.
* \param p number of insertion heap to insert item into
*/
void push(const value_type& element, unsigned_type p = 0)
{
assert(!m_in_bulk_push && !m_limit_extract);
heap_type& insheap = m_proc[p]->insertion_heap;
if (insheap.size() >= m_insertion_heap_capacity) {
flush_insertion_heap(p);
}
// push item to end of heap and siftUp
insheap.push_back(element);
unsigned_type index = push_heap(insheap.begin(), insheap.end(),
m_compare);
++m_heaps_size;
if (insheap.size() == 1 || index == 0)
m_minima.update_heap(p);
}
//! Access the minimum element.
const value_type & top()
{
assert(!m_in_bulk_push && !m_limit_extract);
assert(!empty());
if (extract_buffer_empty()) {
refill_extract_buffer(std::min(m_extract_buffer_limit,
m_internal_size + m_external_size));
}
static const bool debug = false;
std::pair<unsigned, unsigned> type_and_index = m_minima.top();
const unsigned& type = type_and_index.first;
const unsigned& index = type_and_index.second;
assert(type < 4);
switch (type) {
case minima_type::HEAP:
STXXL_DEBUG("heap " << index <<
": " << m_proc[index]->insertion_heap[0]);
return m_proc[index]->insertion_heap[0];
case minima_type::IA:
STXXL_DEBUG("ia " << index <<
": " << m_internal_arrays[index].get_min());
return m_internal_arrays[index].get_min();
case minima_type::EB:
STXXL_DEBUG("eb " << m_extract_buffer_index <<
": " << m_extract_buffer[m_extract_buffer_index]);
return m_extract_buffer[m_extract_buffer_index];
default:
STXXL_ERRMSG("Unknown extract type: " << type);
abort();
}
}
//! Remove the minimum element.
void pop()
{
assert(!m_in_bulk_push && !m_limit_extract);
m_stats.num_extracts++;
if (extract_buffer_empty()) {
refill_extract_buffer(std::min(m_extract_buffer_limit,
m_internal_size + m_external_size));
}
m_stats.extract_min_time.start();
std::pair<unsigned, unsigned> type_and_index = m_minima.top();
unsigned type = type_and_index.first;
unsigned index = type_and_index.second;
assert(type < 4);
switch (type) {
case minima_type::HEAP:
{
heap_type& insheap = m_proc[index]->insertion_heap;
m_stats.pop_heap_time.start();
std::pop_heap(insheap.begin(), insheap.end(), m_compare);
insheap.pop_back();
m_stats.pop_heap_time.stop();
m_heaps_size--;
if (!insheap.empty())
m_minima.update_heap(index);
else
m_minima.deactivate_heap(index);
break;
}
case minima_type::IA:
{
m_internal_arrays[index].inc_min();
m_internal_size--;
if (!(m_internal_arrays[index].empty()))
m_minima.update_internal_array(index);
else
// internal array has run empty
m_minima.deactivate_internal_array(index);
break;
}
case minima_type::EB:
{
++m_extract_buffer_index;
assert(m_extract_buffer_size > 0);
--m_extract_buffer_size;
if (!extract_buffer_empty())
m_minima.update_extract_buffer();
else
m_minima.deactivate_extract_buffer();
break;
}
default:
STXXL_ERRMSG("Unknown extract type: " << type);
abort();
}
m_stats.extract_min_time.stop();
check_invariants();
}
//! \}
//! \name Bulk-Limit Operations
//! \{
protected:
//! current limit element
value_type m_limit_element;
//! flag if inside a bulk limit extract session
bool m_limit_extract;
//! flag if the extract buffer contains the full limit range
bool m_limit_has_full_range;
public:
//! Begin bulk-limit extraction session with limit element.
void limit_begin(const value_type& limit, size_type bulk_size)
{
m_limit_extract = true;
m_limit_element = limit;
std::vector<value_type> new_extract_buffer;
m_limit_has_full_range =
bulk_pop_limit(new_extract_buffer, limit, m_extract_buffer_limit);
std::swap(new_extract_buffer, m_extract_buffer);
m_extract_buffer_index = 0;
m_extract_buffer_size = m_extract_buffer.size();
if (m_extract_buffer_size)
m_minima.update_extract_buffer();
else
m_minima.deactivate_extract_buffer();
bulk_push_begin(bulk_size);
}
//! Push new item >= bulk-limit element into insertion heap p.
void limit_push(const value_type& element, const unsigned_type p = 0)
{
assert(m_limit_extract);
assert(!m_compare(m_limit_element, element));
return bulk_push(element, p);
}
//! Access the minimum element, which can only be in the extract buffer.
const value_type & limit_top()
{
assert(m_limit_extract);
// if buffer is empty and we extracted the full range last time, return
// limit items as sentinel.
if (m_extract_buffer_size == 0 && m_limit_has_full_range)
return m_limit_element;
if (extract_buffer_empty())
{
// extract more items
std::vector<value_type> new_extract_buffer;
m_limit_has_full_range =
bulk_pop_limit(new_extract_buffer, m_limit_element,
m_extract_buffer_limit);
std::swap(new_extract_buffer, m_extract_buffer);
m_extract_buffer_index = 0;
m_extract_buffer_size = m_extract_buffer.size();
if (m_extract_buffer_size)
m_minima.update_extract_buffer();
else
m_minima.deactivate_extract_buffer();
}
return m_extract_buffer[m_extract_buffer_index];
}
//! Remove the minimum element, only works correctly while elements < L.
void limit_pop()
{
assert(m_limit_extract);
++m_extract_buffer_index;
assert(m_extract_buffer_size > 0);
--m_extract_buffer_size;
if (extract_buffer_empty() && !m_limit_has_full_range)
{
// extract more items
std::vector<value_type> new_extract_buffer;
m_limit_has_full_range =
bulk_pop_limit(new_extract_buffer, m_limit_element,
m_extract_buffer_limit);
std::swap(new_extract_buffer, m_extract_buffer);
m_extract_buffer_index = 0;
m_extract_buffer_size = m_extract_buffer.size();
if (m_extract_buffer_size)
m_minima.update_extract_buffer();
else
m_minima.deactivate_extract_buffer();
}
}
//! Finish bulk-limit extraction session.
void limit_end()
{
assert(m_limit_extract);
bulk_push_end();
m_limit_extract = false;
}
//! \}
protected:
//! Flushes all elements of the insertion heaps which are greater
//! or equal to a given limit.
//! \param limit limit value
void flush_insertion_heaps_with_limit(const value_type& limit)
{
// perform extract for all items < L into back of insertion_heap
std::vector<unsigned_type> back_size(m_num_insertion_heaps);
//#if STXXL_PARALLEL
//#pragma omp parallel for
//#endif
for (size_t p = 0; p < m_num_insertion_heaps; ++p)
{
heap_type& insheap = m_proc[p]->insertion_heap;
typename heap_type::iterator back = insheap.end();
while (back != insheap.begin() &&
m_compare(limit, insheap[0]))
{
// while top < L, perform pop_heap: put top to back and
// siftDown new items (shortens heap by one)
std::pop_heap(insheap.begin(), back, m_compare);
--back;
}
// range insheap.begin() + back to insheap.end() is < L, rest >= L.
for (typename heap_type::const_iterator it = insheap.begin();
it != insheap.end(); ++it)
{
if (it < back)
assert(!m_compare(limit, *it));
else
assert(m_compare(limit, *it));
}
back_size[p] = insheap.end() - back;
}
// put items from insertion heaps into an internal array
unsigned_type back_sum = std::accumulate(
back_size.begin(), back_size.end(), unsigned_type(0));
STXXL_DEBUG("flush_insertion_heaps_with_limit(): back_sum = " << back_sum);
if (back_sum)
{
// test that enough RAM is available for remaining items
flush_ia_ea_until_memory_free(back_sum * sizeof(value_type));
std::vector<value_type> values(back_sum);
// copy items into values vector
typename std::vector<value_type>::iterator vi = values.begin();
for (size_t p = 0; p < m_num_insertion_heaps; ++p)
{
heap_type& insheap = m_proc[p]->insertion_heap;
std::copy(insheap.end() - back_size[p], insheap.end(), vi);
vi += back_size[p];
insheap.resize(insheap.size() - back_size[p]);
if (insheap.empty())
m_minima.deactivate_heap(p);
else
m_minima.update_heap(p);
}
potentially_parallel::sort(values.begin(), values.end(), m_inv_compare);
add_as_internal_array(values);
m_heaps_size -= back_sum;
}
}
public:
/*!
* Merges all external arrays and all internal arrays into one external array.
* Public for benchmark purposes.
*/
void merge_external_arrays()
{
STXXL_ERRMSG("Merging external arrays. This should not happen."
<< " You should adjust memory assignment and/or external array level size.");
check_external_level(0, true);
STXXL_DEBUG("Merging all external arrays done.");
resize_read_pool();
// Rebuild hint tree completely as the hint sequence may have changed.
if (!m_in_bulk_push)
rebuild_hint_tree();
else
assert(m_external_arrays.size() - 1 >= m_bulk_first_delayed_external_array);
check_invariants();
}
//! Free up memory by flushing internal arrays and combining external
//! arrays until enough bytes are free.
void flush_ia_ea_until_memory_free(internal_size_type mem_free)
{
if (m_mem_left >= mem_free) return;
if (m_internal_size > 0) {
flush_internal_arrays();
}
else {
merge_external_arrays();
}
assert(m_mem_left >= mem_free);
}
//! Automatically resize the read/prefetch buffer pool depending on number
//! of external arrays.
void resize_read_pool()
{
unsigned_type new_num_read_blocks =
m_num_read_blocks_per_ea * m_external_arrays.size();
STXXL_DEBUG("resize_read_pool:" <<
" m_num_read_blocks=" << m_num_read_blocks <<
" ea_size=" << m_external_arrays.size() <<
" m_num_read_blocks_per_ea=" << m_num_read_blocks_per_ea <<
" new_num_read_blocks=" << new_num_read_blocks <<
" free_size_prefetch=" << m_pool.free_size_prefetch() <<
" m_num_hinted_blocks=" << m_num_hinted_blocks <<
" m_num_used_read_blocks=" << m_num_used_read_blocks);
// add new blocks
if (new_num_read_blocks > m_num_read_blocks)
{
unsigned_type mem_needed =
(new_num_read_blocks - m_num_read_blocks) * block_size;
// -tb: this may recursively call this function!
//flush_ia_ea_until_memory_free(mem_needed);
STXXL_ASSERT(m_mem_left >= mem_needed);
while (new_num_read_blocks > m_num_read_blocks) {
block_type* new_block = new block_type();
m_pool.add_prefetch(new_block);
++m_num_read_blocks;
}
m_mem_left -= mem_needed;
}
// steal extra blocks (as many as possible)
if (new_num_read_blocks < m_num_read_blocks)
{
while (new_num_read_blocks < m_num_read_blocks &&
m_pool.free_size_prefetch() > 0)
{
block_type* del_block = m_pool.steal_prefetch();
delete del_block;
--m_num_read_blocks;
m_mem_left += block_size;
}
if (new_num_read_blocks < m_num_read_blocks)
STXXL_ERRMSG("WARNING: could not immediately reduce read/prefetch pool!");
}
}
//! Rebuild hint tree completely as the hint sequence may have changed, and
//! re-hint the correct block sequence.
void rebuild_hint_tree()
{
m_stats.hint_time.start();
// prepare rehinting sequence: reset hint begin pointer
for (size_t i = 0; i < m_external_arrays.size(); ++i)
m_external_arrays[i].rebuild_hints_prepare();
// rebuild hint tree with first elements
for (size_t i = 0; i < m_external_arrays.size(); ++i)
{
if (m_external_arrays[i].has_unhinted_em_data()) {
m_hint_tree.activate_without_replay(i);
}
else {
m_hint_tree.deactivate_without_replay(i);
}
}
m_hint_tree.rebuild();
// virtually release all hints
unsigned_type free_prefetch_blocks =
m_pool.free_size_prefetch() + m_num_hinted_blocks;
m_num_hinted_blocks = 0;
int gmin_index;
while (free_prefetch_blocks > 0 &&
(gmin_index = m_hint_tree.top()) >= 0)
{
assert((size_t)gmin_index < m_external_arrays.size());
STXXL_DEBUG("Give pre-hint in EA[" << gmin_index << "] min " <<
m_external_arrays[gmin_index].get_next_hintable_min());
m_external_arrays[gmin_index].rebuild_hints_prehint_next_block();
--free_prefetch_blocks;
++m_num_hinted_blocks;
if (m_external_arrays[gmin_index].has_unhinted_em_data()) {
m_hint_tree.replay_on_change(gmin_index);
}
else {
m_hint_tree.deactivate_player(gmin_index);
}
}
// invalidate all hinted blocks no longer needed
for (size_t i = 0; i < m_external_arrays.size(); ++i)
m_external_arrays[i].rebuild_hints_cancel();
// perform real hinting on pre-hinted blocks
for (size_t i = 0; i < m_external_arrays.size(); ++i)
m_external_arrays[i].rebuild_hints_finish();
assert(free_prefetch_blocks == m_pool.free_size_prefetch());
m_stats.hint_time.stop();
}
//! Updates the prefetch prediction tree afer a remove_items(), which frees
//! up blocks.
//! \param ea_index index of the external array in question
inline void update_hint_tree(size_t ea_index)
{
m_stats.hint_time.start();
if (m_external_arrays[ea_index].has_unhinted_em_data()) {
m_hint_tree.replay_on_change(ea_index);
}
else {
m_hint_tree.deactivate_player(ea_index);
}
m_stats.hint_time.stop();
}
//! Updates the external min tree afer a remove() or a
//! wait_next_blocks() call.
//! \param ea_index index of the external array in question
inline void update_external_min_tree(size_t ea_index)
{
if (m_external_arrays[ea_index].has_em_data()) {
m_external_min_tree.replay_on_change(ea_index);
}
else {
m_external_min_tree.deactivate_player(ea_index);
}
}
//! Hints EA blocks which will be needed soon. Hints at most
//! m_num_prefetchers blocks globally.
inline void hint_external_arrays()
{
m_stats.hint_time.start();
STXXL_DEBUG("hint_external_arrays()"
" for free_size_prefetch=" << m_pool.free_size_prefetch());
int gmin_index;
while (m_pool.free_size_prefetch() > 0 &&
(gmin_index = m_hint_tree.top()) >= 0)
{
assert((size_t)gmin_index < m_external_arrays.size());
STXXL_DEBUG("Give hint in EA[" << gmin_index << "]");
m_external_arrays[gmin_index].hint_next_block();
++m_num_hinted_blocks;
if (m_external_arrays[gmin_index].has_unhinted_em_data()) {
m_hint_tree.replay_on_change(gmin_index);
}
else {
m_hint_tree.deactivate_player(gmin_index);
}
}
m_stats.hint_time.stop();
}
//! Print statistics.
void print_stats() const
{
STXXL_VARDUMP(c_merge_sorted_heaps);
STXXL_VARDUMP(c_limit_extract_buffer);
STXXL_VARDUMP(c_single_insert_limit);
if (c_limit_extract_buffer) {
STXXL_VARDUMP(m_extract_buffer_limit);
STXXL_MEMDUMP(m_extract_buffer_limit * sizeof(value_type));
}
#if STXXL_PARALLEL
STXXL_VARDUMP(omp_get_max_threads());
#endif
STXXL_MEMDUMP(m_mem_for_heaps);
STXXL_MEMDUMP(m_mem_left);
//if (num_extract_buffer_refills > 0) {
// STXXL_VARDUMP(total_extract_buffer_size / num_extract_buffer_refills);
// STXXL_MEMDUMP(total_extract_buffer_size / num_extract_buffer_refills * sizeof(value_type));
//}
STXXL_MSG(m_stats);
m_minima.print_stats();
}
protected:
//! Calculates the sequences vector needed by the multiway merger,
//! considering inaccessible data from external arrays.
//! The sizes vector stores the size of each sequence.
//! \param reuse_previous_lower_bounds Reuse upper bounds from previous runs.
//! sequences[i].second must be valid upper bound iterator from a previous run!
//! \returns the index of the external array which is limiting factor
//! or m_external_arrays.size() if not limited.
size_t calculate_merge_sequences(std::vector<size_type>& sizes,
std::vector<iterator_pair_type>& sequences,
bool reuse_previous_lower_bounds = false)
{
STXXL_DEBUG("calculate merge sequences");
static const bool debug = false;
const size_type eas = m_external_arrays.size();
const size_type ias = m_internal_arrays.size();
assert(sizes.size() == eas + ias);
assert(sequences.size() == eas + ias);
/*
* determine minimum of each first block
*/
int gmin_index = m_external_min_tree.top();
bool needs_limit = (gmin_index >= 0) ? true : false;
// test correctness of external block min tree
#ifdef STXXL_DEBUG_ASSERTIONS
bool test_needs_limit = false;
int test_gmin_index = 0;
value_type test_gmin_value;
m_stats.refill_minmax_time.start();
for (size_type i = 0; i < eas; ++i) {
if (m_external_arrays[i].has_em_data()) {
const value_type& min_value =
m_external_arrays[i].get_next_block_min();
if (!test_needs_limit) {
test_needs_limit = true;
test_gmin_value = min_value;
test_gmin_index = i;
}
else {
STXXL_DEBUG("min[" << i << "]: " << min_value <<
" test: " << test_gmin_value <<
": " << m_inv_compare(min_value, test_gmin_value));
if (m_inv_compare(min_value, test_gmin_value)) {
test_gmin_value = min_value;
test_gmin_index = i;
}
}
}
}
m_stats.refill_minmax_time.stop();
STXXL_ASSERT(needs_limit == test_needs_limit);
STXXL_ASSERT(!needs_limit || gmin_index == test_gmin_index);
#endif
/*
* calculate size and create sequences to merge
*/
#if STXXL_PARALLEL
// #pragma omp parallel for if(eas + ias > m_num_insertion_heaps)
#endif
for (size_type i = 0; i < eas + ias; ++i) {
iterator begin, end;
if (i < eas) {
begin = m_external_arrays[i].begin();
end = m_external_arrays[i].end();
}
else {
size_type j = i - eas;
begin = m_internal_arrays[j].begin();
end = m_internal_arrays[j].end();
}
if (needs_limit) {
const value_type& gmin_value =
m_external_arrays[gmin_index].get_next_block_min();
// remove timer if parallel
//stats.refill_lower_bound_time.start();
if (reuse_previous_lower_bounds) {
// Be careful that sequences[i].second is really valid and
// set by a previous calculate_merge_sequences() run!
end = std::lower_bound(sequences[i].second, end,
gmin_value, m_inv_compare);
}
else
{
end = std::lower_bound(begin, end,
gmin_value, m_inv_compare);
}
//stats.refill_lower_bound_time.stop();
}
sizes[i] = std::distance(begin, end);
sequences[i] = std::make_pair(begin, end);
STXXL_DEBUG("sequence[" << i << "] " << (i < eas ? "ea " : "ia ") <<
begin << " - " << end <<
" size " << sizes[i] <<
(needs_limit ? " with ub limit" : ""));
}
if (needs_limit) {
STXXL_DEBUG("return with needs_limit: gmin_index=" << gmin_index);
return gmin_index;
}
else {
STXXL_DEBUG("return with needs_limit: eas=" << eas);
return eas;
}
}
protected:
//! Convert extract buffer into a new internal array.
void convert_eb_into_ia(bool do_not_flush = false)
{
if (m_extract_buffer_size == 0) return;
STXXL_DEBUG("convert_eb_into_ia");
// tb: if in limit sequence and the EB gets flushed out to EM, then we
// have to re-merge items into the EB instead of returning the
// sentinel.
m_limit_has_full_range = false;
// TODO: memory is NOT allocated, but extract buffer is currently not
// counted
if (!do_not_flush)
flush_ia_ea_until_memory_free(
internal_array_type::int_memory(m_extract_buffer.size())
);
if (m_extract_buffer_size == 0) return;
// first deactivate extract buffer to replay tree for new IA.
m_minima.deactivate_extract_buffer();
// add eb as internal array with current index
add_as_internal_array(m_extract_buffer, m_extract_buffer_index);
m_extract_buffer_index = 0;
m_extract_buffer_size = 0;
}
//! Refills the extract buffer from the external arrays.
//! \param minimum_size requested minimum size of the resulting extract buffer.
//! Prints a warning if there is not enough data to reach this size.
//! \param maximum_size maximum size of the extract buffer. Using
//! m_extract_buffer_limit if set to 0.
inline void refill_extract_buffer(size_t minimum_size = 0,
size_t maximum_size = 0)
{
STXXL_DEBUG("refilling extract buffer" <<
" ia_size=" << m_internal_arrays.size() <<
" ea_size=" << m_external_arrays.size());
if (maximum_size == 0)
maximum_size = m_extract_buffer_limit;
check_invariants();
assert(extract_buffer_empty());
m_extract_buffer_index = 0;
cleanup_external_arrays();
size_type ias, eas = m_external_arrays.size();
m_minima.clear_internal_arrays();
cleanup_internal_arrays();
ias = m_internal_arrays.size();
if (eas == 0 && ias == 0) {
m_extract_buffer.resize(0);
m_minima.deactivate_extract_buffer();
return;
}
m_stats.num_extract_buffer_refills++;
m_stats.refill_extract_buffer_time.start();
m_stats.refill_time_before_merge.start();
std::vector<size_type> sizes(eas + ias);
std::vector<iterator_pair_type> sequences(eas + ias);
size_type output_size = 0;
if (minimum_size > 0) {
size_t limiting_ea_index = eas + 1;
bool reuse_lower_bounds = false;
while (output_size < minimum_size)
{
STXXL_DEBUG("refill: request more data," <<
" output_size=" << output_size <<
" minimum_size=" << minimum_size <<
" limiting_ea_index=" << limiting_ea_index);
if (limiting_ea_index < eas) {
if (m_external_arrays[limiting_ea_index].num_hinted_blocks() == 0)
break;
wait_next_ea_blocks(limiting_ea_index);
reuse_lower_bounds = true;
}
else if (limiting_ea_index == eas) {
// no more unaccessible EM data
STXXL_MSG("Warning: refill_extract_buffer(n): "
"minimum_size > # mergeable elements!");
break;
}
limiting_ea_index = calculate_merge_sequences(
sizes, sequences, reuse_lower_bounds);
output_size = std::accumulate(sizes.begin(), sizes.end(), 0);
}
}
else {
calculate_merge_sequences(sizes, sequences);
output_size = std::accumulate(sizes.begin(), sizes.end(), 0);
}
if (c_limit_extract_buffer) {
output_size = std::min<size_t>(output_size, maximum_size);
}
m_stats.max_extract_buffer_size.set_max(output_size);
m_stats.total_extract_buffer_size += output_size;
assert(output_size > 0);
m_extract_buffer.resize(output_size);
m_extract_buffer_size = output_size;
m_stats.refill_time_before_merge.stop();
m_stats.refill_merge_time.start();
potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
m_extract_buffer.begin(), output_size, m_inv_compare);
m_stats.refill_merge_time.stop();
m_stats.refill_time_after_merge.start();
advance_arrays(sequences, sizes, eas, ias);
m_minima.update_extract_buffer();
m_stats.refill_time_after_merge.stop();
m_stats.refill_extract_buffer_time.stop();
check_invariants();
}
//! Requests more EM data from a given EA and updates
//! the winner trees and hints accordingly.
inline void wait_next_ea_blocks(unsigned_type ea_index)
{
unsigned_type used_blocks =
m_external_arrays[ea_index].wait_next_blocks();
m_num_hinted_blocks -= used_blocks;
m_num_used_read_blocks += used_blocks;
update_external_min_tree(ea_index);
}
// Removes empty arrays and updates the winner trees accordingly
inline void advance_arrays(std::vector<iterator_pair_type>& sequences,
std::vector<size_type>& sizes,
size_t eas, size_t ias)
{
unsigned_type total_freed_blocks = 0;
for (size_type i = 0; i < eas + ias; ++i) {
// dist represents the number of elements that haven't been merged
size_type dist = std::distance(sequences[i].first,
sequences[i].second);
const size_t diff = sizes[i] - dist;
if (diff == 0) continue;
if (i < eas) {
// remove items and free blocks in RAM.
unsigned_type freed_blocks =
m_external_arrays[i].remove_items(diff);
m_num_used_read_blocks -= freed_blocks;
total_freed_blocks += freed_blocks;
// correct item count.
assert(m_external_size >= diff);
m_external_size -= diff;
}
else {
size_type j = i - eas;
m_internal_arrays[j].inc_min(diff);
assert(m_internal_size >= diff);
m_internal_size -= diff;
}
}
// remove empty arrays - important for the next round (may also reduce
// number of prefetch buffers, so must be before hinting).
cleanup_external_arrays();
// prefetch new blocks from EAs using freed blocks
if (total_freed_blocks)
hint_external_arrays();
m_stats.num_new_external_arrays = 0;
cleanup_internal_arrays();
}
//! Flushes the insertions heap p into an internal array.
inline void flush_insertion_heap(unsigned_type p)
{
assert(m_proc[p]->insertion_heap.size() != 0);
heap_type& insheap = m_proc[p]->insertion_heap;
size_t size = insheap.size();
STXXL_DEBUG0(
"Flushing insertion heap array p=" << p <<
" size=" << insheap.size() <<
" capacity=" << insheap.capacity() <<
" int_memory=" << internal_array_type::int_memory(insheap.size()) <<
" mem_left=" << m_mem_left);
m_stats.num_insertion_heap_flushes++;
stats_timer flush_time(true); // separate timer due to parallel sorting
// sort locally, independent of others
std::sort(insheap.begin(), insheap.end(), m_inv_compare);
#if STXXL_PARALLEL
#pragma omp critical (stxxl_flush_insertion_heap)
#endif
{
// test that enough RAM is available for merged internal array:
// otherwise flush the existing internal arrays out to disk.
flush_ia_ea_until_memory_free(
internal_array_type::int_memory(insheap.size()));
// invalidate player in minima tree (before adding the IA to tree)
m_minima.deactivate_heap(p);
// insheap is empty afterwards, as vector was swapped into new_array
add_as_internal_array(insheap);
// reserve new insertion heap
insheap.reserve(m_insertion_heap_capacity);
assert(insheap.capacity() * sizeof(value_type)
== insertion_heap_int_memory());
// update item counts
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size -= size;
}
m_stats.insertion_heap_flush_time += flush_time;
}
//! Flushes all insertions heaps into an internal array.
inline void flush_insertion_heaps()
{
size_type max_mem_needed;
if (c_merge_sorted_heaps) {
max_mem_needed = m_mem_for_heaps;
}
else {
max_mem_needed = insertion_heap_int_memory();
}
// test that enough RAM is available for merged internal array:
// otherwise flush the existing internal arrays out to disk.
flush_ia_ea_until_memory_free(max_mem_needed);
m_stats.num_insertion_heap_flushes++;
m_stats.insertion_heap_flush_time.start();
size_type size = m_heaps_size;
size_type int_memory = 0;
assert(size > 0);
std::vector<std::pair<value_iterator, value_iterator> > sequences(m_num_insertion_heaps);
#if STXXL_PARALLEL
#pragma omp parallel for
#endif
for (long i = 0; i < m_num_insertion_heaps; ++i)
{
heap_type& insheap = m_proc[i]->insertion_heap;
std::sort(insheap.begin(), insheap.end(), m_inv_compare);
if (c_merge_sorted_heaps)
sequences[i] = std::make_pair(insheap.begin(), insheap.end());
int_memory += insheap.capacity();
}
if (c_merge_sorted_heaps)
{
m_stats.merge_sorted_heaps_time.start();
std::vector<value_type> merged_array(size);
potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
merged_array.begin(), size, m_inv_compare);
m_stats.merge_sorted_heaps_time.stop();
add_as_internal_array(merged_array);
for (int_type i = 0; i < m_num_insertion_heaps; ++i)
{
m_proc[i]->insertion_heap.clear();
m_proc[i]->insertion_heap.reserve(m_insertion_heap_capacity);
}
m_minima.clear_heaps();
}
else
{
for (unsigned i = 0; i < m_num_insertion_heaps; ++i)
{
heap_type& insheap = m_proc[i]->insertion_heap;
if (insheap.size() == 0) continue;
add_as_internal_array(insheap);
// reserve new insertion heap
insheap.reserve(m_insertion_heap_capacity);
}
m_minima.clear_heaps();
}
m_heaps_size = 0;
m_stats.insertion_heap_flush_time.stop();
check_invariants();
}
//! Flushes the internal arrays into an external array.
void flush_internal_arrays()
{
STXXL_DEBUG("Flushing internal arrays" <<
" num_arrays=" << m_internal_arrays.size());
m_stats.num_internal_array_flushes++;
m_stats.internal_array_flush_time.start();
m_minima.clear_internal_arrays();
// also flush extract buffer items out to disk.
convert_eb_into_ia(true);
// clean up internal arrays that have been deleted in extract_min!
cleanup_internal_arrays();
size_type num_arrays = m_internal_arrays.size();
size_type size = m_internal_size;
size_type int_memory = 0;
std::vector<iterator_pair_type> sequences(num_arrays);
for (unsigned i = 0; i < num_arrays; ++i)
{
sequences[i] = std::make_pair(m_internal_arrays[i].begin(),
m_internal_arrays[i].end());
int_memory += m_internal_arrays[i].int_memory();
}
// must release more RAM in IAs than the EA takes, otherwise: merge
// external and internal arrays!
if (int_memory < external_array_type::int_memory(size)
+ ceil(m_num_read_blocks_per_ea) * block_size)
{
return merge_external_arrays();
}
// construct new external array
external_array_type ea(size, &m_pool, 0);
m_stats.max_merge_buffer_size.set_max(size);
{
external_array_writer_type external_array_writer(ea);
potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
external_array_writer.begin(), size, m_inv_compare);
}
STXXL_DEBUG("Merge done of new ea " << &ea);
m_external_arrays.swap_back(ea);
m_internal_size = 0;
m_external_size += size;
// register EA in min tree
// important for check_external_level()!
m_external_min_tree.activate_without_replay(m_external_arrays.size() - 1);
update_external_min_tree(m_external_arrays.size() - 1);
// register EA in hint tree
m_hint_tree.activate_without_replay(m_external_arrays.size() - 1);
if (!m_in_bulk_push)
update_hint_tree(m_external_arrays.size() - 1);
// else: done in bulk_push_end() -> rebuild_hint_tree()
m_internal_arrays.clear();
m_stats.num_new_internal_arrays = 0;
cleanup_internal_arrays();
// TODO: is this necessary? See cleanup_internal_arrays().
for (size_t i = 0; i < c_max_internal_levels; ++i)
m_internal_levels[i] = 0;
m_mem_left += int_memory;
m_mem_left -= m_external_arrays.back().int_memory();
m_stats.max_num_external_arrays.set_max(m_external_arrays.size());
m_stats.internal_array_flush_time.stop();
// update EA level and potentially merge
++m_external_levels[0];
check_external_level(0);
resize_read_pool();
// Rebuild hint tree completely as the hint sequence may have changed.
if (!m_in_bulk_push)
rebuild_hint_tree();
else
assert(m_external_arrays.size() - 1 >= m_bulk_first_delayed_external_array);
check_invariants();
}
// Compares the largest accessible value of two external arrays.
struct s_min_tree_comparator {
const external_arrays_type& m_eas;
const std::vector<unsigned_type>& m_indices;
const inv_compare_type& m_compare;
s_min_tree_comparator(const external_arrays_type& eas,
const inv_compare_type& compare,
const std::vector<unsigned_type>& indices)
: m_eas(eas), m_indices(indices), m_compare(compare) { }
bool operator () (const size_t& a, const size_t& b) const
{
return m_compare(m_eas[m_indices[a]].get_next_hintable_min(),
m_eas[m_indices[b]].get_next_hintable_min());
}
};
//! Merges external arrays if there are too many external arrays on
//! the same level.
void check_external_level(unsigned_type level, bool force_merge_all = false)
{
if (!force_merge_all)
STXXL_DEBUG("Checking external level " << level);
// return if EA level is not full
if (m_external_levels[level] < c_max_external_level_size && !force_merge_all)
return;
unsigned_type level_size = 0;
size_type int_memory = 0;
std::vector<unsigned_type> ea_index;
for (unsigned_type i = 0; i < m_external_arrays.size(); ++i)
{
if (m_external_arrays[i].level() != level && !force_merge_all) continue;
if (m_external_arrays[i].empty()) continue;
level_size += m_external_arrays[i].size();
int_memory += m_external_arrays[i].int_memory();
ea_index.push_back(i);
}
// return if there is not enough RAM for the new array.
// TODO: force_merge_all==true is for freeing memory. Breaking here is not
// helpful in this case. But one should maybe reserve some space in advance.
if (m_mem_left < external_array_type::int_memory(level_size) && !force_merge_all)
return;
m_mem_left -= external_array_type::int_memory(level_size);
STXXL_ASSERT(force_merge_all || c_max_external_level_size == ea_index.size());
unsigned_type num_arrays_to_merge = ea_index.size();
STXXL_DEBUG("merging external arrays" <<
" level=" << level <<
" level_size=" << level_size <<
" sequences=" << num_arrays_to_merge <<
" force_merge_all=" << force_merge_all);
// if force_merge_all: create array in highest level to avoid merging
// of such a large EA.
unsigned_type new_level = force_merge_all ? c_max_external_levels - 1 : level + 1;
// construct new external array
external_array_type ea(level_size, &m_pool, new_level);
{
external_array_writer_type external_array_writer(ea);
typename external_array_writer_type::iterator out_iter
= external_array_writer.begin();
// === build minima_tree over the level's arrays ===
s_min_tree_comparator min_tree_comparator(m_external_arrays,
m_inv_compare, ea_index);
winner_tree<s_min_tree_comparator> min_tree(num_arrays_to_merge,
min_tree_comparator);
// =================================================
int_type num_arrays_done = 0;
while (num_arrays_to_merge != num_arrays_done)
{
STXXL_DEBUG("num_arrays_done = " << num_arrays_done);
// === build hints ===
for (int_type i = 0; i < num_arrays_to_merge; ++i) {
if (m_external_arrays[ea_index[i]].has_unhinted_em_data()) {
min_tree.activate_without_replay(i);
}
else {
min_tree.deactivate_without_replay(i);
}
}
min_tree.rebuild();
// === fill available memory with read blocks ===
while (m_mem_left >= block_size) {
block_type* new_block = new block_type();
m_pool.add_prefetch(new_block);
++m_num_read_blocks;
m_mem_left -= block_size;
}
// ==============================================
// cleanup hints (all arrays, not only the ones to merge)
for (unsigned_type i = 0; i < m_external_arrays.size(); ++i) {
m_external_arrays[i].rebuild_hints_prepare();
}
// virtually release all hints
unsigned_type free_prefetch_blocks =
m_pool.free_size_prefetch() + m_num_hinted_blocks;
m_num_hinted_blocks = 0;
int gmin_index_index; // index in ea_index
while (free_prefetch_blocks > 0 &&
(gmin_index_index = min_tree.top()) >= 0)
{
const unsigned_type gmin_index = ea_index[gmin_index_index];
assert(gmin_index < m_external_arrays.size());
STXXL_DEBUG0("check_external_level():Give pre-hint in EA[" << gmin_index << "] min " <<
m_external_arrays[gmin_index].get_next_hintable_min());
m_external_arrays[gmin_index].rebuild_hints_prehint_next_block();
--free_prefetch_blocks;
++m_num_hinted_blocks;
if (m_external_arrays[gmin_index].has_unhinted_em_data()) {
min_tree.replay_on_change(gmin_index_index);
}
else {
min_tree.deactivate_player(gmin_index_index);
}
}
// invalidate all hinted blocks no longer needed
// (all arrays, not only the ones to merge)
for (size_t i = 0; i < m_external_arrays.size(); ++i)
m_external_arrays[i].rebuild_hints_cancel();
// perform real hinting on pre-hinted blocks
// (all arrays, not only the ones to merge)
for (size_t i = 0; i < m_external_arrays.size(); ++i)
m_external_arrays[i].rebuild_hints_finish();
assert(free_prefetch_blocks == m_pool.free_size_prefetch());
// ================================ end build hints ======
// === wait for data ===
for (size_type i = 0; i < num_arrays_to_merge; ++i) {
const unsigned_type index = ea_index[i];
unsigned_type used_blocks =
m_external_arrays[index].wait_all_hinted_blocks();
m_num_hinted_blocks -= used_blocks;
m_num_used_read_blocks += used_blocks;
}
// =====================
// === build sequences ===
std::vector<iterator_pair_type> sequences(num_arrays_to_merge);
std::vector<size_type> sizes(num_arrays_to_merge);
gmin_index_index = min_tree.top();
bool needs_limit = (gmin_index_index >= 0) ? true : false;
for (size_type i = 0; i < num_arrays_to_merge; ++i) {
const unsigned_type index = ea_index[i];
iterator begin = m_external_arrays[index].begin();
iterator end = m_external_arrays[index].end();
if (needs_limit) {
const unsigned_type gmin_index = ea_index[gmin_index_index];
const value_type& gmin_value =
m_external_arrays[gmin_index].get_next_block_min();
end = std::lower_bound(begin, end,
gmin_value, m_inv_compare);
}
sizes[i] = std::distance(begin, end);
sequences[i] = std::make_pair(begin, end);
STXXL_DEBUG("sequence[" << i << "] ea " <<
begin << " - " << end <<
" size " << sizes[i] <<
(needs_limit ? " with ub limit" : ""));
}
// ==========================================
// === merge ===
size_type output_size = std::accumulate(sizes.begin(), sizes.end(), 0);
out_iter = potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
out_iter, output_size, m_inv_compare);
for (unsigned_type i = 0; i < num_arrays_to_merge; ++i) {
const unsigned_type index = ea_index[i];
if (!m_external_arrays[index].empty()) {
// remove items and free blocks in RAM.
unsigned_type freed_blocks =
m_external_arrays[index].remove_items(sizes[i]);
m_num_used_read_blocks -= freed_blocks;
if (m_external_arrays[index].empty())
++num_arrays_done;
}
}
// reset read buffer
resize_read_pool();
// cannot call clear_external_arrays() here, since it
// corrupts ea_index.
}
if (m_in_bulk_push)
m_bulk_first_delayed_external_array = 0; // TODO: workaround
} // destroy external_array_writer
// clean up now empty arrays
cleanup_external_arrays();
m_external_arrays.swap_back(ea);
++m_external_levels[new_level];
// register EA in min tree
m_external_min_tree.activate_without_replay(m_external_arrays.size() - 1);
update_external_min_tree(m_external_arrays.size() - 1);
// register EA in hint tree
m_hint_tree.activate_without_replay(m_external_arrays.size() - 1);
if (!m_in_bulk_push)
update_hint_tree(m_external_arrays.size() - 1);
// else: done in bulk_push_end() -> rebuild_hint_tree()
STXXL_DEBUG("Merge done of new ea " << &ea);
if (!force_merge_all)
check_external_level(level + 1);
check_invariants();
}
//! Add new internal array, which requires that values are sorted!
//! automatically decreases m_mem_left! also merges internal arrays if
//! there are too many internal arrays on the same level.
void add_as_internal_array(std::vector<value_type>& values,
unsigned_type used = 0,
unsigned_type level = 0)
{
const size_t size = values.size();
const size_t capacity = values.capacity();
assert(size > used); // at least one element
internal_array_type new_array(values, used, level);
STXXL_ASSERT(new_array.int_memory() ==
internal_array_type::int_memory(capacity));
m_internal_arrays.swap_back(new_array);
if (!extract_buffer_empty()) {
m_stats.num_new_internal_arrays++;
m_stats.max_num_new_internal_arrays.set_max(
m_stats.num_new_internal_arrays);
m_minima.add_internal_array(
static_cast<unsigned>(m_internal_arrays.size()) - 1
);
}
m_internal_size += size - used;
m_mem_left -= internal_array_type::int_memory(capacity);
STXXL_CHECK(level < c_max_internal_levels &&
"Internal array level is larger than anything possible "
"in this universe. Increase the size of m_internal_levels");
++m_internal_levels[level];
m_stats.max_num_internal_arrays.set_max(m_internal_arrays.size());
// if IA level is too large ...
if (m_internal_levels[level] < c_max_internal_level_size) return;
unsigned_type level_size = 0;
size_type int_memory = 0;
std::vector<iterator_pair_type> sequences;
std::vector<unsigned_type> ia_index;
for (unsigned_type i = 0; i < m_internal_arrays.size(); ++i)
{
if (m_internal_arrays[i].level() != level) continue;
if (m_internal_arrays[i].empty()) continue;
level_size += m_internal_arrays[i].size();
int_memory += m_internal_arrays[i].int_memory();
sequences.push_back(std::make_pair(m_internal_arrays[i].begin(),
m_internal_arrays[i].end()));
ia_index.push_back(i);
}
// AND there is enough RAM to merge it (without flushing out to EA).
if (m_mem_left < internal_array_type::int_memory(level_size)) return;
// must free up more memory than the new array needs.
STXXL_ASSERT(int_memory >= internal_array_type::int_memory(level_size));
STXXL_DEBUG("merging internal arrays" <<
" level=" << level <<
" level_size=" << level_size <<
" sequences=" << sequences.size());
std::vector<value_type> merged_array(level_size);
potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
merged_array.begin(), level_size, m_inv_compare);
// release memory of old internal arrays immediately
for (unsigned_type i = 0; i < ia_index.size(); ++i)
{
unsigned_type ia = ia_index[i];
m_internal_arrays[ia].make_empty();
// this is done in cleanup_internal_arrays()...
//if (ia < m_minima.ia_slots())
// m_minima.deactivate_internal_array(ia);
}
cleanup_internal_arrays();
// in add_as_internal_array the level_size is re-added!
m_internal_size -= level_size;
// add as new internal array at next level (and maybe recursively merge)
add_as_internal_array(merged_array, 0, level + 1);
}
/*!
* Sorts the values from values and writes them into an internal array.
* Don't use the value vector afterwards!
*
* \param values the vector to sort and store
*/
void flush_array_internal(std::vector<value_type>& values)
{
potentially_parallel::sort(values.begin(), values.end(), m_inv_compare);
// flush until enough memory for new array
flush_ia_ea_until_memory_free(
internal_array_type::int_memory(values.size())
);
add_as_internal_array(values);
}
//! Struct of all statistical counters and timers. Turn on/off statistics
//! using the stats_counter and stats_timer typedefs.
struct stats_type
{
//! Largest number of elements in the extract buffer at the same time
stats_counter max_extract_buffer_size;
//! Sum of the sizes of each extract buffer refill. Used for average
//! size.
stats_counter total_extract_buffer_size;
//! Largest number of elements in the merge buffer when running
//! flush_internal_arrays()
stats_counter max_merge_buffer_size;
//! Total number of extracts
stats_counter num_extracts;
//! Number of refill_extract_buffer() calls
stats_counter num_extract_buffer_refills;
//! Number of flush_insertion_heaps() calls
stats_counter num_insertion_heap_flushes;
//! Number of flush_directly_to_hd() calls
stats_counter num_direct_flushes;
//! Number of flush_internal_arrays() calls
stats_counter num_internal_array_flushes;
//! Number of merge_external_arrays() calls
stats_counter num_external_array_merges;
//! Largest number of internal arrays at the same time
stats_counter max_num_internal_arrays;
//! Largest number of external arrays at the same time
stats_counter max_num_external_arrays;
//! Temporary number of new external arrays at the same time (which
//! were created while the extract buffer hadn't been empty)
stats_counter num_new_external_arrays;
//! Largest number of new external arrays at the same time (which were
//! created while the extract buffer hadn't been empty)
stats_counter max_num_new_external_arrays;
//! Temporary number of new internal arrays at the same time (which
//! were created while the extract buffer hadn't been empty)
stats_counter num_new_internal_arrays;
//! Largest number of new internal arrays at the same time (which were
//! created while the extract buffer hadn't been empty)
stats_counter max_num_new_internal_arrays;
//! Total time for flush_insertion_heaps()
stats_timer insertion_heap_flush_time;
//! Total time for flush_directly_to_hd()
stats_timer direct_flush_time;
//! Total time for flush_internal_arrays()
stats_timer internal_array_flush_time;
//! Total time for merge_external_arrays()
stats_timer external_array_merge_time;
//! Total time for extract_min()
stats_timer extract_min_time;
//! Total time for refill_extract_buffer()
stats_timer refill_extract_buffer_time;
//! Total time for the merging in refill_extract_buffer()
//! Part of refill_extract_buffer_time.
stats_timer refill_merge_time;
//! Total time for all things before merging in refill_extract_buffer()
//! Part of refill_extract_buffer_time.
stats_timer refill_time_before_merge;
//! Total time for all things after merging in refill_extract_buffer()
//! Part of refill_extract_buffer_time.
stats_timer refill_time_after_merge;
//! Total time of wait() calls in first part of
//! refill_extract_buffer(). Part of refill_time_before_merge and
//! refill_extract_buffer_time.
stats_timer refill_wait_time;
//! Total time for pop_heap() in extract_min().
//! Part of extract_min_time.
stats_timer pop_heap_time;
//! Total time for merging the sorted heaps.
//! Part of flush_insertion_heaps.
stats_timer merge_sorted_heaps_time;
//! Total time for std::lower_bound calls in refill_extract_buffer()
//! Part of refill_extract_buffer_time and refill_time_before_merge.
// stats_timer refill_lower_bound_time;
//! Total time for std::accumulate calls in refill_extract_buffer()
//! Part of refill_extract_buffer_time and refill_time_before_merge.
stats_timer refill_accumulate_time;
//! Total time for determining the smallest max value in refill_extract_buffer()
//! Part of refill_extract_buffer_time and refill_time_before_merge.
stats_timer refill_minmax_time;
stats_timer hint_time;
friend std::ostream& operator << (std::ostream& os, const stats_type& o)
{
return os << "max_extract_buffer_size=" << o.max_extract_buffer_size.as_memory_amount(sizeof(value_type)) << std::endl
<< "total_extract_buffer_size=" << o.total_extract_buffer_size.as_memory_amount(sizeof(value_type)) << std::endl
<< "max_merge_buffer_size=" << o.max_merge_buffer_size.as_memory_amount(sizeof(value_type)) << std::endl
<< "num_extracts=" << o.num_extracts << std::endl
<< "num_extract_buffer_refills=" << o.num_extract_buffer_refills << std::endl
<< "num_insertion_heap_flushes=" << o.num_insertion_heap_flushes << std::endl
<< "num_direct_flushes=" << o.num_direct_flushes << std::endl
<< "num_internal_array_flushes=" << o.num_internal_array_flushes << std::endl
<< "num_external_array_merges=" << o.num_external_array_merges << std::endl
<< "max_num_internal_arrays=" << o.max_num_internal_arrays << std::endl
<< "max_num_external_arrays=" << o.max_num_external_arrays << std::endl
<< "num_new_external_arrays=" << o.num_new_external_arrays << std::endl
<< "max_num_new_external_arrays=" << o.max_num_new_external_arrays << std::endl
<< "num_new_internal_arrays=" << o.num_new_internal_arrays << std::endl
<< "max_num_new_internal_arrays=" << o.max_num_new_internal_arrays << std::endl
<< "insertion_heap_flush_time=" << o.insertion_heap_flush_time << std::endl
<< "direct_flush_time=" << o.direct_flush_time << std::endl
<< "internal_array_flush_time=" << o.internal_array_flush_time << std::endl
<< "external_array_merge_time=" << o.external_array_merge_time << std::endl
<< "extract_min_time=" << o.extract_min_time << std::endl
<< "refill_extract_buffer_time=" << o.refill_extract_buffer_time << std::endl
<< "refill_merge_time=" << o.refill_merge_time << std::endl
<< "refill_time_before_merge=" << o.refill_time_before_merge << std::endl
<< "refill_time_after_merge=" << o.refill_time_after_merge << std::endl
<< "refill_wait_time=" << o.refill_wait_time << std::endl
<< "pop_heap_time=" << o.pop_heap_time << std::endl
<< "merge_sorted_heaps_time=" << o.merge_sorted_heaps_time << std::endl
// << "refill_lower_bound_time=" << o.refill_lower_bound_time << std::endl
<< "refill_accumulate_time=" << o.refill_accumulate_time << std::endl
<< "refill_minmax_time=" << o.refill_minmax_time << std::endl
<< "hint_time=" << o.hint_time << std::endl;
}
};
stats_type m_stats;
};
// For C++98 compatibility:
template <
class ValueType,
class CompareType,
class AllocStrategy,
uint64 BlockSize,
uint64 DefaultMemSize,
uint64 MaxItems
>
const double parallel_priority_queue<ValueType, CompareType, AllocStrategy, BlockSize,
DefaultMemSize, MaxItems>::c_default_extract_buffer_ram_part = 0.05;
STXXL_END_NAMESPACE
#endif // !STXXL_CONTAINERS_PARALLEL_PRIORITY_QUEUE_HEADER
|
computeGraph.c | #include "defs.h"
double computeGraph(graph* G, graphSDG* SDGdata) {
VERT_T* endV;
LONG_T *degree, *numEdges, *pos, *pSums;
WEIGHT_T* w;
double elapsed_time;
#ifdef _OPENMP
omp_lock_t *vLock;
LONG_T chunkSize;
#endif
elapsed_time = get_seconds();
#ifdef _OPENMP
omp_set_num_threads(NUM_THREADS);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
LONG_T i, j, u, n, m, tid, nthreads;
#ifdef DIAGNOSTIC
double elapsed_time_part;
#endif
#ifdef _OPENMP
nthreads = omp_get_num_threads();
tid = omp_get_thread_num();
#else
tid = 0;
nthreads = 1;
#endif
n = N;
m = M;
if (tid == 0) {
#ifdef _OPENMP
vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t));
assert(vLock != NULL);
chunkSize = n/nthreads;
#endif
pos = (LONG_T *) malloc(m*sizeof(LONG_T));
assert(pos != NULL);
degree = (LONG_T *) calloc(n, sizeof(LONG_T));
assert(degree != NULL);
}
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds();
}
#endif
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for schedule(static, chunkSize)
for (i=0; i<n; i++) {
omp_init_lock(&vLock[i]);
}
#pragma omp barrier
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Lock initialization time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
#pragma omp for
#endif
for (i=0; i<m; i++) {
u = SDGdata->startVertex[i];
#ifdef _OPENMP
omp_set_lock(&vLock[u]);
#endif
pos[i] = degree[u]++;
#ifdef _OPENMP
omp_unset_lock(&vLock[u]);
#endif
}
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Degree computation time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for schedule(static, chunkSize)
for (i=0; i<n; i++) {
omp_destroy_lock(&vLock[i]);
}
if (tid == 0)
free(vLock);
#endif
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Lock destruction time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
if (tid == 0) {
numEdges = (LONG_T *) malloc((n+1)*sizeof(LONG_T));
pSums = (LONG_T *) malloc(nthreads*sizeof(LONG_T));
}
#ifdef _OPENMP
#pragma omp barrier
#endif
prefix_sums(degree, numEdges, pSums, n);
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Prefix sums time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
#ifdef _OPENMP
#pragma omp barrier
#endif
if (tid == 0) {
free(degree);
free(pSums);
w = (WEIGHT_T *) malloc(m*sizeof(WEIGHT_T));
endV = (VERT_T *) malloc(m* sizeof(VERT_T));
}
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for
#endif
for (i=0; i<m; i++) {
u = SDGdata->startVertex[i];
j = numEdges[u] + pos[i];
endV[j] = SDGdata->endVertex[i];
//TODO:
//w[j] = SDGdata->weight[i];
fprintf(stderr, "%d\n", SDGdata->weight[i]);
w[j] = 1;
}
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Edge data structure construction time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
if (tid == 0) {
free(pos);
G->n = n;
G->m = m;
G->numEdges = numEdges;
G->endV = endV;
G->weight = w;
}
#ifdef _OPENMP
#endif
}
/* Verification */
#if 0
fprintf(stderr, "SDG data:\n");
for (int i=0; i<SDGdata->m; i++) {
fprintf(stderr, "[%ld %ld %ld] ", SDGdata->startVertex[i],
SDGdata->endVertex[i], SDGdata->weight[i]);
}
fprintf(stderr, "\n");
for (int i=0; i<G->n + 1; i++) {
fprintf(stderr, "[%ld] ", G->numEdges[i]);
}
fprintf(stderr, "\nGraph:\n");
for (int i=0; i<G->n; i++) {
for (int j=G->numEdges[i]; j<G->numEdges[i+1]; j++) {
fprintf(stderr, "[%ld %ld %ld] ", i, G->endV[j], G->weight[j]);
}
}
#endif
free(SDGdata->startVertex);
free(SDGdata->endVertex);
free(SDGdata->weight);
elapsed_time = get_seconds() - elapsed_time;
return elapsed_time;
}
|
DeclOpenMP.h | //===- DeclOpenMP.h - Classes for representing OpenMP directives -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines OpenMP nodes for declarative directives.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_DECLOPENMP_H
#define LLVM_CLANG_AST_DECLOPENMP_H
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/TrailingObjects.h"
namespace clang {
/// This represents '#pragma omp threadprivate ...' directive.
/// For example, in the following, both 'a' and 'A::b' are threadprivate:
///
/// \code
/// int a;
/// #pragma omp threadprivate(a)
/// struct A {
/// static int b;
/// #pragma omp threadprivate(b)
/// };
/// \endcode
///
class OMPThreadPrivateDecl final
: public Decl,
private llvm::TrailingObjects<OMPThreadPrivateDecl, Expr *> {
friend class ASTDeclReader;
friend TrailingObjects;
unsigned NumVars;
virtual void anchor();
OMPThreadPrivateDecl(Kind DK, DeclContext *DC, SourceLocation L) :
Decl(DK, DC, L), NumVars(0) { }
ArrayRef<const Expr *> getVars() const {
return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumVars);
}
MutableArrayRef<Expr *> getVars() {
return MutableArrayRef<Expr *>(getTrailingObjects<Expr *>(), NumVars);
}
void setVars(ArrayRef<Expr *> VL);
public:
static OMPThreadPrivateDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
ArrayRef<Expr *> VL);
static OMPThreadPrivateDecl *CreateDeserialized(ASTContext &C,
unsigned ID, unsigned N);
typedef MutableArrayRef<Expr *>::iterator varlist_iterator;
typedef ArrayRef<const Expr *>::iterator varlist_const_iterator;
typedef llvm::iterator_range<varlist_iterator> varlist_range;
typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVars().begin(); }
varlist_iterator varlist_end() { return getVars().end(); }
varlist_const_iterator varlist_begin() const { return getVars().begin(); }
varlist_const_iterator varlist_end() const { return getVars().end(); }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPThreadPrivate; }
};
/// This represents '#pragma omp declare reduction ...' directive.
/// For example, in the following, declared reduction 'foo' for types 'int' and
/// 'float':
///
/// \code
/// #pragma omp declare reduction (foo : int,float : omp_out += omp_in) \
/// initializer (omp_priv = 0)
/// \endcode
///
/// Here 'omp_out += omp_in' is a combiner and 'omp_priv = 0' is an initializer.
class OMPDeclareReductionDecl final : public ValueDecl, public DeclContext {
// This class stores some data in DeclContext::OMPDeclareReductionDeclBits
// to save some space. Use the provided accessors to access it.
public:
enum InitKind {
CallInit, // Initialized by function call.
DirectInit, // omp_priv(<expr>)
CopyInit // omp_priv = <expr>
};
private:
friend class ASTDeclReader;
/// Combiner for declare reduction construct.
Expr *Combiner = nullptr;
/// Initializer for declare reduction construct.
Expr *Initializer = nullptr;
/// In parameter of the combiner.
Expr *In = nullptr;
/// Out parameter of the combiner.
Expr *Out = nullptr;
/// Priv parameter of the initializer.
Expr *Priv = nullptr;
/// Orig parameter of the initializer.
Expr *Orig = nullptr;
/// Reference to the previous declare reduction construct in the same
/// scope with the same name. Required for proper templates instantiation if
/// the declare reduction construct is declared inside compound statement.
LazyDeclPtr PrevDeclInScope;
virtual void anchor();
OMPDeclareReductionDecl(Kind DK, DeclContext *DC, SourceLocation L,
DeclarationName Name, QualType Ty,
OMPDeclareReductionDecl *PrevDeclInScope);
void setPrevDeclInScope(OMPDeclareReductionDecl *Prev) {
PrevDeclInScope = Prev;
}
public:
/// Create declare reduction node.
static OMPDeclareReductionDecl *
Create(ASTContext &C, DeclContext *DC, SourceLocation L, DeclarationName Name,
QualType T, OMPDeclareReductionDecl *PrevDeclInScope);
/// Create deserialized declare reduction node.
static OMPDeclareReductionDecl *CreateDeserialized(ASTContext &C,
unsigned ID);
/// Get combiner expression of the declare reduction construct.
Expr *getCombiner() { return Combiner; }
const Expr *getCombiner() const { return Combiner; }
/// Get In variable of the combiner.
Expr *getCombinerIn() { return In; }
const Expr *getCombinerIn() const { return In; }
/// Get Out variable of the combiner.
Expr *getCombinerOut() { return Out; }
const Expr *getCombinerOut() const { return Out; }
/// Set combiner expression for the declare reduction construct.
void setCombiner(Expr *E) { Combiner = E; }
/// Set combiner In and Out vars.
void setCombinerData(Expr *InE, Expr *OutE) {
In = InE;
Out = OutE;
}
/// Get initializer expression (if specified) of the declare reduction
/// construct.
Expr *getInitializer() { return Initializer; }
const Expr *getInitializer() const { return Initializer; }
/// Get initializer kind.
InitKind getInitializerKind() const {
return static_cast<InitKind>(OMPDeclareReductionDeclBits.InitializerKind);
}
/// Get Orig variable of the initializer.
Expr *getInitOrig() { return Orig; }
const Expr *getInitOrig() const { return Orig; }
/// Get Priv variable of the initializer.
Expr *getInitPriv() { return Priv; }
const Expr *getInitPriv() const { return Priv; }
/// Set initializer expression for the declare reduction construct.
void setInitializer(Expr *E, InitKind IK) {
Initializer = E;
OMPDeclareReductionDeclBits.InitializerKind = IK;
}
/// Set initializer Orig and Priv vars.
void setInitializerData(Expr *OrigE, Expr *PrivE) {
Orig = OrigE;
Priv = PrivE;
}
/// Get reference to previous declare reduction construct in the same
/// scope with the same name.
OMPDeclareReductionDecl *getPrevDeclInScope();
const OMPDeclareReductionDecl *getPrevDeclInScope() const;
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPDeclareReduction; }
static DeclContext *castToDeclContext(const OMPDeclareReductionDecl *D) {
return static_cast<DeclContext *>(const_cast<OMPDeclareReductionDecl *>(D));
}
static OMPDeclareReductionDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<OMPDeclareReductionDecl *>(
const_cast<DeclContext *>(DC));
}
};
/// Pseudo declaration for capturing expressions. Also is used for capturing of
/// non-static data members in non-static member functions.
///
/// Clang supports capturing of variables only, but OpenMP 4.5 allows to
/// privatize non-static members of current class in non-static member
/// functions. This pseudo-declaration allows properly handle this kind of
/// capture by wrapping captured expression into a variable-like declaration.
class OMPCapturedExprDecl final : public VarDecl {
friend class ASTDeclReader;
void anchor() override;
OMPCapturedExprDecl(ASTContext &C, DeclContext *DC, IdentifierInfo *Id,
QualType Type, TypeSourceInfo *TInfo,
SourceLocation StartLoc)
: VarDecl(OMPCapturedExpr, C, DC, StartLoc, StartLoc, Id, Type, TInfo,
SC_None) {
setImplicit();
}
public:
static OMPCapturedExprDecl *Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id, QualType T,
SourceLocation StartLoc);
static OMPCapturedExprDecl *CreateDeserialized(ASTContext &C, unsigned ID);
SourceRange getSourceRange() const override LLVM_READONLY;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPCapturedExpr; }
};
/// This represents '#pragma omp requires...' directive.
/// For example
///
/// \code
/// #pragma omp requires unified_address
/// \endcode
///
class OMPRequiresDecl final
: public Decl,
private llvm::TrailingObjects<OMPRequiresDecl, OMPClause *> {
friend class ASTDeclReader;
friend TrailingObjects;
// Number of clauses associated with this requires declaration
unsigned NumClauses = 0;
virtual void anchor();
OMPRequiresDecl(Kind DK, DeclContext *DC, SourceLocation L)
: Decl(DK, DC, L), NumClauses(0) {}
/// Returns an array of immutable clauses associated with this requires
/// declaration
ArrayRef<const OMPClause *> getClauses() const {
return llvm::makeArrayRef(getTrailingObjects<OMPClause *>(), NumClauses);
}
/// Returns an array of clauses associated with this requires declaration
MutableArrayRef<OMPClause *> getClauses() {
return MutableArrayRef<OMPClause *>(getTrailingObjects<OMPClause *>(),
NumClauses);
}
/// Sets an array of clauses to this requires declaration
void setClauses(ArrayRef<OMPClause *> CL);
public:
/// Create requires node.
static OMPRequiresDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, ArrayRef<OMPClause *> CL);
/// Create deserialized requires node.
static OMPRequiresDecl *CreateDeserialized(ASTContext &C, unsigned ID,
unsigned N);
using clauselist_iterator = MutableArrayRef<OMPClause *>::iterator;
using clauselist_const_iterator = ArrayRef<const OMPClause *>::iterator;
using clauselist_range = llvm::iterator_range<clauselist_iterator>;
using clauselist_const_range = llvm::iterator_range<clauselist_const_iterator>;
unsigned clauselist_size() const { return NumClauses; }
bool clauselist_empty() const { return NumClauses == 0; }
clauselist_range clauselists() {
return clauselist_range(clauselist_begin(), clauselist_end());
}
clauselist_const_range clauselists() const {
return clauselist_const_range(clauselist_begin(), clauselist_end());
}
clauselist_iterator clauselist_begin() { return getClauses().begin(); }
clauselist_iterator clauselist_end() { return getClauses().end(); }
clauselist_const_iterator clauselist_begin() const {
return getClauses().begin();
}
clauselist_const_iterator clauselist_end() const {
return getClauses().end();
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPRequires; }
};
} // end namespace clang
#endif
|
arrayStack.c | // -----------------------------------------------------------------------------
//
// "00_AccelGraph"
//
// -----------------------------------------------------------------------------
// Copyright (c) 2014-2019 All rights reserved
// -----------------------------------------------------------------------------
// Author : Abdullah Mughrabi
// Email : atmughra@ncsu.edu||atmughrabi@gmail.com
// File : arrayStack.c
// Create : 2019-06-21 17:15:17
// Revise : 2019-09-28 15:36:13
// Editor : Abdullah Mughrabi
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <stdint.h>
#include <omp.h>
#include "myMalloc.h"
#include "arrayStack.h"
#include "bitmap.h"
struct ArrayStack *newArrayStack(uint32_t size)
{
struct ArrayStack *arrayStack = (struct ArrayStack *) my_malloc( sizeof(struct ArrayStack));
arrayStack->head = 0;
arrayStack->tail = 0;
arrayStack->tail_next = 0;
arrayStack->size = size;
arrayStack->Stack = (uint32_t *) my_malloc(size * sizeof(uint32_t));
arrayStack->q_bitmap = newBitmap(size);
arrayStack->q_bitmap_next = newBitmap(size);
return arrayStack;
}
void resetArrayStack(struct ArrayStack *q)
{
q->head = 0;
q->tail = 0;
q->tail_next = 0;
clearBitmap(q->q_bitmap);
}
void freeArrayStack(struct ArrayStack *q)
{
if(q)
{
if(q->q_bitmap_next)
freeBitmap(q->q_bitmap_next);
if(q->q_bitmap)
freeBitmap(q->q_bitmap);
if(q->Stack)
free(q->Stack);
free(q);
}
}
void pushArrayStack (struct ArrayStack *q, uint32_t k)
{
q->Stack[q->tail] = k;
q->tail = (q->tail + 1) % q->size;
q->tail_next = q->tail;
}
void pushArrayStackWithBitmap (struct ArrayStack *q, uint32_t k)
{
q->Stack[q->tail] = k;
setBit(q->q_bitmap, k);
q->tail = q->tail_next;
q->tail++;
q->tail_next++;
}
void pushArrayStackAtomic (struct ArrayStack *q, uint32_t k)
{
uint32_t local_q_tail = __sync_fetch_and_add(&q->tail, 1);
q->Stack[local_q_tail] = k;
}
void pushArrayStackWithBitmapAtomic (struct ArrayStack *q, uint32_t k)
{
uint32_t local_q_tail = __sync_fetch_and_add(&q->tail, 1);
q->Stack[local_q_tail] = k;
setBitAtomic(q->q_bitmap, k);
}
void pushArrayStackDelayed (struct ArrayStack *q, uint32_t k)
{
q->Stack[q->tail_next] = k;
q->tail_next++;
}
void pushArrayStackDelayedWithBitmap (struct ArrayStack *q, uint32_t k)
{
q->Stack[q->tail_next] = k;
setBit(q->q_bitmap_next, k);
q->tail_next++;
}
void pushArrayStackDelayedWithBitmapAtomic (struct ArrayStack *q, uint32_t k)
{
uint32_t local_q_tail_next = __sync_fetch_and_add(&q->tail_next, 1);
setBitAtomic(q->q_bitmap, k);
q->Stack[local_q_tail_next] = k;
}
void slideWindowArrayStack (struct ArrayStack *q)
{
q->head = q->tail;
q->tail = q->tail_next;
}
void slideWindowArrayStackBitmap (struct ArrayStack *q)
{
q->head = q->tail;
q->tail = q->tail_next;
swapBitmaps(&q->q_bitmap, &q->q_bitmap_next);
clearBitmap(q->q_bitmap_next);
}
uint32_t popArrayStack(struct ArrayStack *q)
{
uint32_t k = q->Stack[q->tail - 1];
clearBit(q->q_bitmap, k);
q->tail = q->tail - 1;
return k;
}
uint32_t frontArrayStack (struct ArrayStack *q)
{
uint32_t k = q->Stack[q->head];
return k;
}
uint8_t isEmptyArrayStackCurr (struct ArrayStack *q)
{
if((q->tail > q->head))
return 0;
else
return 1;
}
uint8_t isEmptyArrayStack (struct ArrayStack *q)
{
if(!isEmptyArrayStackCurr(q) || !isEmptyArrayStackNext(q))
return 0;
else
return 1;
}
uint8_t isEmptyArrayStackNext (struct ArrayStack *q)
{
if((q->tail_next > q->head))
return 0;
else
return 1;
}
uint8_t ispushArrayStack (struct ArrayStack *q, uint32_t k)
{
return getBit(q->q_bitmap, k);
}
uint8_t ispushArrayStackNext (struct ArrayStack *q, uint32_t k)
{
return getBit(q->q_bitmap_next, k);
}
uint32_t sizeArrayStackCurr(struct ArrayStack *q)
{
return q->tail - q->head;
}
uint32_t sizeArrayStackNext(struct ArrayStack *q)
{
return q->tail_next - q->tail;
}
uint32_t sizeArrayStack(struct ArrayStack *q)
{
return q->tail_next - q->head;
}
void flushArrayStackToShared(struct ArrayStack *local_q, struct ArrayStack *shared_q)
{
uint32_t shared_q_tail_next = __sync_fetch_and_add(&shared_q->tail_next, local_q->tail);
uint32_t local_q_size = local_q->tail - local_q->head;
memcpy(&shared_q->Stack[shared_q_tail_next], &local_q->Stack[local_q->head], local_q_size * (sizeof(uint32_t)));
local_q->head = 0;
local_q->tail = 0;
local_q->tail_next = 0;
}
void arrayStackGenerateBitmap(struct ArrayStack *q)
{
uint32_t v;
uint32_t i;
#pragma omp parallel for
for(i = q->head ; i < q->tail; i++)
{
v = q->Stack[i];
setBitAtomic(q->q_bitmap, v);
}
}
void arrayStackToBitmap(struct ArrayStack *q, struct Bitmap *b)
{
uint32_t v;
uint32_t i;
#pragma omp parallel for default(none) shared(q,b) private(v,i)
for(i = q->head ; i < q->tail; i++)
{
v = q->Stack[i];
setBitAtomic(b, v);
}
// b->numSetBits = q->q_bitmap->numSetBits;
q->head = q->tail;
q->tail_next = q->tail;
}
void bitmapToArrayStack(struct Bitmap *b, struct ArrayStack *q, struct ArrayStack **localFrontierStacks)
{
#pragma omp parallel default(none) shared(b,localFrontierStacks,q)
{
uint32_t i;
uint32_t t_id = omp_get_thread_num();
struct ArrayStack *localFrontierStack = localFrontierStacks[t_id];
#pragma omp for
for(i = 0 ; i < (b->size); i++)
{
if(getBit(b, i))
{
localFrontierStack->Stack[localFrontierStack->tail] = i;
localFrontierStack->tail++;
}
}
flushArrayStackToShared(localFrontierStack, q);
}
slideWindowArrayStack(q);
}
|
intersectionFunctor.h | // Copyright 2014 Nicolas Mellado
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -------------------------------------------------------------------------- //
//
// Authors: Nicolas Mellado
//
// An implementation of the Super 4-points Congruent Sets (Super 4PCS)
// algorithm presented in:
//
// Super 4PCS: Fast Global Pointcloud Registration via Smart Indexing
// Nicolas Mellado, Dror Aiger, Niloy J. Mitra
// Symposium on Geometry Processing 2014.
//
// Data acquisition in large-scale scenes regularly involves accumulating
// information across multiple scans. A common approach is to locally align scan
// pairs using Iterative Closest Point (ICP) algorithm (or its variants), but
// requires static scenes and small motion between scan pairs. This prevents
// accumulating data across multiple scan sessions and/or different acquisition
// modalities (e.g., stereo, depth scans). Alternatively, one can use a global
// registration algorithm allowing scans to be in arbitrary initial poses. The
// state-of-the-art global registration algorithm, 4PCS, however has a quadratic
// time complexity in the number of data points. This vastly limits its
// applicability to acquisition of large environments. We present Super 4PCS for
// global pointcloud registration that is optimal, i.e., runs in linear time (in
// the number of data points) and is also output sensitive in the complexity of
// the alignment problem based on the (unknown) overlap across scan pairs.
// Technically, we map the algorithm as an ‘instance problem’ and solve it
// efficiently using a smart indexing data organization. The algorithm is
// simple, memory-efficient, and fast. We demonstrate that Super 4PCS results in
// significant speedup over alternative approaches and allows unstructured
// efficient acquisition of scenes at scales previously not possible. Complete
// source code and datasets are available for research use at
// http://geometry.cs.ucl.ac.uk/projects/2014/super4PCS/.
#pragma once
#include "gr/accelerators/pairExtraction/intersectionNode.h"
#include <list>
#include <iostream>
namespace gr{
template <typename Scalar>
static Scalar GetRoundedEpsilonValue(Scalar epsilon, int* lvl = nullptr) {
const int lvlMax = -std::log2(epsilon); //!< Maximum level
if (lvl != nullptr) *lvl = lvlMax;
// Refine epsilon by the closest conservative values
return double(1)/double(pow(2,lvlMax));
}
//! \brief Extract pairs of points by rasterizing primitives and collect points
/*!
* Acceleration technique used in Super4PCS
* \todo Use Traits to allow custom parameters but similar API between variants
* \see BruteForceFunctor
*/
template <class _Primitive, class _Point, int _dim, typename _Scalar>
struct IntersectionFunctor{
typedef _Point Point;
typedef _Primitive Primitive;
typedef _Scalar Scalar;
enum { dim = _dim };
template <class PrimitiveContainer,
class PointContainer,
class ProcessingFunctor> //!< Process the extracted pairs
void
process(
const PrimitiveContainer& M, //!< Input primitives to intersect with Q
const PointContainer & Q, //!< Normalized innput point set \in [0:1]^d
Scalar &epsilon, //!< Intersection accuracy, refined
unsigned int minNodeSize, //!< Min number of points in nodes
ProcessingFunctor& functor
);
};
/*!
\return Pairs< PointId, PrimitiveId>
*/
template <class Primitive, class Point, int dim, typename Scalar>
template <class PrimitiveContainer,
class PointContainer,
class ProcessingFunctor>
void
IntersectionFunctor<Primitive, Point, dim, Scalar>::process(
const PrimitiveContainer& M, //!< Input primitives to intersect with Q
const PointContainer & Q, //!< Normalized innput point set \in [0:1]^d
Scalar &epsilon, //!< Intersection accuracy in [0:1]
unsigned int minNodeSize, //!< Min number of points in nodes
ProcessingFunctor& functor
)
{
using std::pow;
// types definitions
typedef NdNode<Point, dim, Scalar, PointContainer> Node;
typedef typename std::vector<Node> NodeContainer;
typedef typename std::pair<unsigned int, unsigned int> ResPair;
typedef typename std::vector<ResPair> ResContainer;
// Global variables
const unsigned int nbPoint = Q.size(); //!< Number of points
int lvlMax = 0;
epsilon = GetRoundedEpsilonValue(epsilon, &lvlMax);
int clvl = 0; //!< Current level
// Use local array and manipulate references to avoid array copies
NodeContainer ping, pong;
NodeContainer* nodes = &ping; //!< Nodes of the current level
NodeContainer* childNodes = &pong; //!< Child nodes for the next level
//! Nodes too small for split
std::vector< std::pair<Node, Scalar> > earlyNodes;
//
// // Fill the idContainer with identity values
if (functor.ids.size() != nbPoint){
std::cout << "[IntersectionFunctor] Init id array" << std::endl;
functor.ids.clear();
for(unsigned int i = 0; i < nbPoint; i++)
functor.ids.push_back(i);
}
// Buid root node in the child node, will be copied to the current nodes
childNodes->push_back(Node::buildUnitRootNode(Q, functor.ids));
Scalar edgeLength { 0 };
Scalar edgeHalfLength { 0 };
// First Loop
while (clvl != lvlMax-1){
// Stop if we not have any nodes to checks
if (childNodes->empty())
break;
edgeLength = Scalar(1)/pow(2, clvl);
edgeHalfLength = edgeLength/Scalar(2);
// swap pointers
std::swap(nodes, childNodes);
childNodes->clear();
//#pragma omp parallel
for(typename NodeContainer::iterator nit = nodes->begin();
nit != nodes->end(); nit++){
Node &n = *nit;
// Check if the current node intersect one of the primitives
// In this case, subdivide, store new nodes and stop the loop
for(typename PrimitiveContainer::const_iterator pit = M.begin();
pit != M.end(); pit++){
if ((*pit).intersect(n.center(), edgeHalfLength+epsilon)){
// There is two options now: either there is already few points in the
// current node, in that case we stop splitting it, or we split.
if (n.rangeLength() > int(minNodeSize)){
//#pragma omp critical
n.split(*childNodes, edgeHalfLength);
}else{
//#pragma omp critical
earlyNodes.emplace_back(n, edgeHalfLength+epsilon);
}
break;
}
}
}
clvl++;
}
// Second Loop
ResContainer results;
results.reserve(childNodes->size());
unsigned int pId = 0;
for(typename PrimitiveContainer::const_iterator itP = M.begin();
itP != M.end(); itP++, pId++){
// add childs
for(typename NodeContainer::const_iterator itN = childNodes->begin();
itN != childNodes->end(); itN++){
if ((*itP).intersect((*itN).center(), epsilon*Scalar(2))){
functor.beginPrimitiveCollect(pId);
for(unsigned int j = 0; j!= (unsigned int)((*itN).rangeLength()); j++){
if(pId>(*itN).idInRange(j))
if((*itP).intersectPoint((*itN).pointInRange(j),epsilon))
functor.process(pId, (*itN).idInRange(j));
}
functor.endPrimitiveCollect(pId);
}
}
// add other leafs
for(typename std::vector< std::pair<Node, Scalar> >::const_iterator itPairs =
earlyNodes.begin();
itPairs != earlyNodes.end();
itPairs++){
if((*itP).intersect((*itPairs).first.center(), (*itPairs).second)){
// Notice the functor we are collecting points for the current primitive
functor.beginPrimitiveCollect(pId);
for(unsigned int j = 0; j!= (unsigned int)((*itPairs).first.rangeLength()); j++){
if(pId>(*itPairs).first.idInRange(j))
if((*itP).intersectPoint((*itPairs).first.pointInRange(j),epsilon))
functor.process(pId, (*itPairs).first.idInRange(j));
}
functor.endPrimitiveCollect(pId);
}
}
}
}
} // namespace gr
|
rose_shared.c | /*
* dependence graph:
*/
#include "omp.h"
void foo()
{
int i;
int x;
int a[100];
#pragma omp parallel for private (i)
for (i = 0; i <= 99; i += 1) {
a[i] = a[i] + 1;
}
}
/*
non loop carried anti dependence for array accesses : level =1 > 0
dep SgExprStatement:a[i] =((a[i]) + 1); SgExprStatement:a[i] =((a[i]) + 1); 1*1 ANTI_DEP; commonlevel = 1 CarryLevel = 1 Is precise SgPntrArrRefExp:(a[i])@10:11->SgPntrArrRefExp:a[i]@10:9 == 0;||::
*/
|
convolution_1x1_pack8to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack8to4_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack8to4_avx(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_sgemm_pack8to4_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m256 _v = _mm256_load_ps(r0);
_mm256_store_ps(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8to4_avx(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
GB_unop__identity_fc32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fc32_int32
// op(A') function: GB_unop_tran__identity_fc32_int32
// C type: GxB_FC32_t
// A type: int32_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fc32_int32
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_ex.c | #include<stdio.h>
#include<omp.h>
int main(){
int id;
int teste = 100;
#pragma omp parallel private(id) num_threads(10) firstprivate(teste)
{
id = omp_get_thread_num();
printf("Thread numero %d = %d\n", id, teste);
#pragma omp master
{
printf("Master\n");
}
}
return 0;
}
|
model.h | #pragma once
#include <util/common/geom/point.h>
#include <util/common/math/vec.h>
#include <util/common/plot/plot.h>
#include <util/common/math/fuzzy.h>
#include <vector>
#include <map>
#include <array>
#include <omp.h>
namespace model
{
/*****************************************************/
/* params */
/*****************************************************/
namespace consts
{
static const double k = 8.617e-5; /* eV / K */
};
struct parameters
{
// system params
size_t n;
// other params
double J /* eV */;
double Tc() const { return 2.269 * std::abs(J) / consts::k; }
};
inline parameters make_default_parameters()
{
parameters p =
{
// system params
10,
// other params
1
};
return p;
}
/*****************************************************/
/* data */
/*****************************************************/
class board
{
public:
struct macroparams
{
double m, e, c, hi;
};
struct avgparams
{
double ea, ma, e2a, m2a;
size_t n;
const parameters * p;
double T;
double w[2];
};
public:
std::vector < std::vector < bool > > data;
macroparams params;
private:
avgparams aparams;
public:
void init(const parameters & p)
{
data.clear();
data.resize(p.n + 2);
for (size_t i = 0; i < p.n + 2; ++i)
{
data[i].resize(p.n + 2);
for (int j = 0; j < p.n + 2; ++j)
data[i][j] = (rand() & 1) == 1;
}
ensure_periodic(p);
aparams = { 0, 0, 0, 0, 0, nullptr, 0 };
}
void begin(const parameters & p, double T)
{
aparams = { 0, 0, 0, 0, 0, &p, T };
aparams.w[0] = std::exp(- 8 * std::abs(p.J) / consts::k / T);
aparams.w[1] = std::exp(- 4 * std::abs(p.J) / consts::k / T);
}
void next()
{
int s = 0;
const size_t n = aparams.p->n;
if ((n - 2) < 4 * omp_get_max_threads())
next_linear(n);
else
next_parallel(n);
if (aparams.p->J > 0)
{
size_t np = 0, nm = 0;
for (size_t i = 1; i < n + 1; ++i)
#pragma omp parallel for reduction(+:np,nm,s) firstprivate(i)
for (int j = 1; j < n + 1; ++j)
{
np += data[i][j] ? 1 : 0;
nm += data[i][j] ? 0 : 1;
s += spin_at(i, j) * (spin_at(i - 1, j) + spin_at(i, j - 1));
}
params.m = std::abs((double)np - (double)nm) / n / n / n / n;
}
else
{
size_t n1p = 0, n1m = 0, n2p = 0, n2m = 0;
for (size_t i = 1; i < n + 1; ++i)
{
#pragma omp parallel for reduction(+:n1p,n1m,n2p,n2m,s) firstprivate(i)
for (int j = 1 + (i & 1); j < n + 1; j += 2)
{
n1p += data[i][j] ? 1 : 0;
n1m += data[i][j] ? 0 : 1;
n2p += data[i][j + 1] ? 1 : 0;
n2m += data[i][j + 1] ? 0 : 1;
s += spin_at(i, j) * (spin_at(i - 1, j) + spin_at(i, j - 1));
}
#pragma omp parallel for reduction(+:n1p,n1m,n2p,n2m,s) firstprivate(i)
for (int j = 1 + (1 - (i & 1)); j < n + 1; j += 2)
{
n1p += data[i][j + 1] ? 1 : 0;
n1m += data[i][j + 1] ? 0 : 1;
n2p += data[i][j] ? 1 : 0;
n2m += data[i][j] ? 0 : 1;
s += spin_at(i, j) * (spin_at(i - 1, j) + spin_at(i, j - 1));
}
}
params.m = std::abs(((double)n1p - (double)n1m) - ((double)n2p - (double)n2m)) / n / n / n / n;
}
params.e = std::abs(aparams.p->J * s / n / n);
aparams.ea += params.e;
aparams.ma += params.m;
aparams.e2a += params.e * params.e;
aparams.m2a += params.m * params.m;
++aparams.n;
}
void end()
{
const size_t n = aparams.p->n;
aparams.e2a /= aparams.n;
aparams.ea /= aparams.n;
aparams.m2a /= aparams.n;
aparams.ma /= aparams.n;
params.e = aparams.ea;
params.m = aparams.ma;
params.c = n * n / (consts::k * aparams.T) / (consts::k * aparams.T) * std::abs(aparams.e2a - aparams.ea * aparams.ea);
params.hi = n * n / (consts::k * aparams.T) * std::abs(aparams.m2a - aparams.ma * aparams.ma);
}
private:
int spin_at(size_t i, size_t j) const { return data[i][j] ? 1 : -1; }
void ensure_periodic(const parameters & p)
{
#pragma omp parallel for
for (int i = 0; i < p.n + 2; ++i)
{
data[i][0] = data[i][p.n];
data[i][p.n + 1] = data[i][1];
data[0][i] = data[p.n][i];
data[p.n + 1][i] = data[1][i];
}
}
void ensure_periodic(const parameters & p, size_t i, size_t j)
{
data[p.n + 1][j] = data[1][j];
data[i][p.n + 1] = data[i][1];
data[0][j] = data[p.n][j];
data[i][0] = data[i][p.n];
}
size_t dE(size_t i, size_t j) const
{
int s = spin_at(i, j - 1) + spin_at(i, j + 1) +
spin_at(i - 1, j) + spin_at(i + 1, j);
s *= aparams.p->J < 0 ? -1 : 1;
return - spin_at(i, j) * s / 2 + 2;
}
void next_linear(size_t n)
{
for (size_t l = 0; l < n * n; ++l)
{
size_t i = (rand() % n) + 1;
size_t j = (rand() % n) + 1;
size_t c = dE(i, j);
if (c >= 2) data[i][j] = !data[i][j];
else
{
if (rand() < aparams.w[c] * RAND_MAX)
data[i][j] = !data[i][j];
}
ensure_periodic(*aparams.p, i, j);
}
}
void next_parallel(size_t n)
{
std::vector < std::array < size_t, 3 > > ranges(omp_get_max_threads());
size_t d = (size_t) std::ceil((double) n / omp_get_max_threads());
for (size_t i = 0; (i < omp_get_max_threads()) && (n > i * d); ++i)
{
ranges[i][0] = i * d;
ranges[i][1] = min((i + 1) * d, n);
ranges[i][2] = min(d, n - i * d) * n;
}
#pragma omp parallel firstprivate(ranges)
{
auto tid = omp_get_thread_num();
auto b = std::get<0>(ranges[tid]);
auto e = std::get<1>(ranges[tid]);
for (size_t l = 0; l < std::get<2>(ranges[tid]); ++l)
{
size_t i = b + rand() % (e - b) + 1;
size_t j = (rand() % n) + 1;
if (i == 1 || i == e || j == 1 || j == n)
{
#pragma omp critical
{
size_t c = dE(i, j);
if (c >= 2) data[i][j] = !data[i][j];
else
{
if (rand() < aparams.w[c] * RAND_MAX)
data[i][j] = !data[i][j];
}
ensure_periodic(*aparams.p, i, j);
}
}
else
{
size_t c = dE(i, j);
if (c >= 2) data[i][j] = !data[i][j];
else
{
if (rand() < aparams.w[c] * RAND_MAX)
data[i][j] = !data[i][j];
}
if (i == n || i == 1 || j == n || j == 1)
{
#pragma omp critical
ensure_periodic(*aparams.p, i, j);
}
}
}
}
}
};
/*****************************************************/
/* drawing */
/*****************************************************/
using points_t = std::vector < geom::point2d_t > ;
struct plot_data
{
util::ptr_t < points_t > data;
plot::list_drawable < points_t > :: ptr_t plot;
plot::world_t::ptr_t world;
plot::auto_viewport < points_t > :: ptr_t autoworld;
};
struct model_data
{
util::ptr_t < parameters > params;
plot_data e_data;
plot_data m_data;
plot_data c_data;
plot_data hi_data;
board system_data;
};
inline static plot_data make_plot_data
(
plot::palette::pen_ptr pen = plot::palette::pen(0xffffff),
plot::list_data_format data_format = plot::list_data_format::chain
)
{
plot_data pd;
pd.world = plot::world_t::create();
pd.autoworld = plot::min_max_auto_viewport < points_t > :: create();
pd.data = util::create < points_t > ();
pd.plot = plot::list_drawable < points_t > :: create
(
plot::make_data_source(pd.data),
nullptr, // no point painter
pen
);
pd.plot->data_format = data_format;
return pd;
}
inline static plot::drawable::ptr_t make_root_drawable
(
const plot_data & p,
std::vector < plot::drawable::ptr_t > layers
)
{
using namespace plot;
return viewporter::create(
tick_drawable::create(
layer_drawable::create(layers),
const_n_tick_factory<axe::x>::create(
make_simple_tick_formatter(6, 8),
0,
5
),
const_n_tick_factory<axe::y>::create(
make_simple_tick_formatter(6, 8),
0,
5
),
palette::pen(RGB(80, 80, 80)),
RGB(200, 200, 200)
),
make_viewport_mapper(make_world_mapper < points_t > (p.autoworld))
);
}
inline std::unique_ptr < CBitmap > export_system(const board & b, bool borders)
{
if (b.data.empty()) return {};
CDC dc; dc.CreateCompatibleDC(nullptr);
size_t cw = 5, ch = 5;
auto bmp = std::make_unique < CBitmap > ();
bmp->CreateBitmap(cw * (b.data.size() - 2), ch * (b.data.size() - 2), 1, 32, NULL);
bmp->SetBitmapDimension(cw * (b.data.size() - 2), ch * (b.data.size() - 2));
dc.SelectObject(bmp.get());
auto nbrush = plot::palette::brush(RGB(150,0,0));
auto pbrush = plot::palette::brush(RGB(0,150,0));
CRect r;
for (size_t i = 0; i < b.data.size() - 2; ++i)
for (size_t j = 0; j < b.data.size() - 2; ++j)
{
bool v = b.data[i + 1][j + 1];
r.left = cw * i; r.right = cw * (i + 1);
r.top = ch * j; r.bottom = ch * (j + 1);
if (borders)
{
dc.SelectObject(v ? pbrush.get() : nbrush.get());
dc.Rectangle(cw * i,
ch * j,
cw * (i + 1),
ch * (j + 1));
}
else
{
dc.FillRect(&r, v ? pbrush.get() : nbrush.get());
}
}
dc.SelectObject((CBitmap *) nullptr);
return bmp;
}
inline plot::drawable::ptr_t make_system_plot(const board & b)
{
return plot::custom_drawable::create([&b] (CDC & dc, const plot::viewport & vp)
{
if (b.data.empty()) return;
size_t cw = vp.screen.width() / (b.data.size() - 2);
size_t ch = vp.screen.height() / (b.data.size() - 2);
auto nbrush = plot::palette::brush(RGB(150,0,0));
auto pbrush = plot::palette::brush(RGB(0,150,0));
if (cw > 2 && ch > 2)
{
CRect r;
for (size_t i = 0; i < b.data.size() - 2; ++i)
for (size_t j = 0; j < b.data.size() - 2; ++j)
{
bool v = b.data[i + 1][j + 1];
dc.SelectObject(v ? pbrush.get() : nbrush.get());
dc.Rectangle(vp.screen.xmin + cw * i,
vp.screen.ymin + ch * j,
vp.screen.xmin + cw * (i + 1),
vp.screen.ymin + ch * (j + 1));
}
}
else
{
auto bmp = export_system(b, false);
CDC memDC; memDC.CreateCompatibleDC(&dc);
memDC.SelectObject(bmp.get());
dc.StretchBlt(vp.screen.xmin, vp.screen.ymin, vp.screen.width(), vp.screen.height(), &memDC, 0, 0, bmp->GetBitmapDimension().cx, bmp->GetBitmapDimension().cy, SRCCOPY);
memDC.SelectObject((CBitmap *) nullptr);
}
});
}
inline model_data make_model_data(const parameters & p = make_default_parameters())
{
model_data md;
md.params = util::create < parameters > (p);
md.e_data = make_plot_data(plot::palette::pen(0x0000ff, 2));
md.m_data = make_plot_data(plot::palette::pen(0x0000ff, 2));
md.c_data = make_plot_data(plot::palette::pen(0x0000ff, 2));
md.hi_data = make_plot_data(plot::palette::pen(0x0000ff, 2));
return md;
}
} |
templatemath.h | /*
* templatemath.h
*
* Created on: Jan 1, 2016
* Author: agibsonccc
*/
#ifndef TEMPLATEMATH_H_
#define TEMPLATEMATH_H_
#include <math.h>
#include <cmath>
#include <dll.h>
#include <pointercast.h>
#define HALF_MAX_VALUE 65504.
#define FLOAT_MAX_VALUE 3.4028235E38
#define DOUBLE_MAX_VALUE 1.7976931348623157E308
#define FLOAT_MIN_NORMAL 1.17549435e-38
#ifdef __CUDACC__
#include <types/float16.h>
#define math_def __host__ __device__
#ifdef CUDA_9
struct HALFS{
half H;
half L;
__host__ __device__
HALFS() {};
__host__ __device__
~HALFS() {};
};
union PAIR {
HALFS B;
int W;
__host__ __device__
PAIR() {};
__host__ __device__
~PAIR(){}
};
#else
typedef union {
struct {
half H;
half L;
} B;
int W;
} PAIR;
#endif // cuda_9
#else
#define math_def
#include <types/float16.h>
#endif
namespace nd4j {
#ifdef __CUDACC__
#endif
namespace math {
template<typename T>
math_def inline T nd4j_abs(T value);
template<typename T>
math_def inline void nd4j_swap(T &val1, T &val2);
template<typename T>
math_def inline T nd4j_max(T val1, T val2);
template<typename T>
math_def inline T nd4j_min(T val1, T val2);
template<typename T>
math_def inline T nd4j_rint(T val1);
template<typename T>
math_def inline T nd4j_copysign(T val1, T val2);
//#ifndef __CUDACC__
template<typename T>
math_def inline T nd4j_dot(T *x, T *y, int length);
//#endif
template<typename T>
math_def inline T nd4j_ceil(T val1);
template<typename T>
math_def inline bool nd4j_isnan(T val1);
template<typename T>
math_def inline bool nd4j_isinf(T val1);
template<typename T>
math_def inline bool nd4j_isfin(T val1);
template<typename T>
math_def inline T nd4j_cos(T val);
template<typename T>
math_def inline T nd4j_cosh(T val);
template<typename T>
math_def inline T nd4j_exp(T val);
template<typename T>
math_def inline T nd4j_floor(T val);
template<typename T>
math_def inline T nd4j_log(T val);
template<typename T>
math_def inline T nd4j_pow(T val, T val2);
template<typename T>
math_def inline T nd4j_round(T val);
template<typename T>
math_def inline T nd4j_remainder(T num, T denom);
template<typename T>
math_def inline T nd4j_fmod(T num, T denom);
template<typename T>
math_def inline T nd4j_erf(T num);
template<typename T>
math_def inline T nd4j_erfc(T num);
template<typename T>
math_def inline T nd4j_sigmoid(T val) {
return (T) 1.0 / ((T) 1.0 + nd4j_exp<T>(-val));
}
template<typename T>
math_def inline T nd4j_elu(T val) {
if (val >= (T) 0.0) return val;
else return nd4j_exp<T>(val) - (T) 1.0;
//return val >= 0.0 ? val : (nd4j_exp<T>(val) - 1.0);
}
template<typename T>
math_def inline T nd4j_leakyrelu(T val,T alpha) {
if (val < (T) 0.0f) return alpha * val;
else return val;
//return val < 0 ? alpha * val : val;
}
template<typename T>
math_def inline T nd4j_eluderivative(T val) {
if (val >= (T) 0.0f) return (T) 1.0f;
else return nd4j_exp<T>(val);
//return val >= 0.0 ? 1.0 : nd4j_exp(val);
}
template<typename T>
math_def inline T nd4j_sin(T val);
template<typename T>
math_def inline T nd4j_sinh(T val);
template<typename T>
math_def inline T softplus(T val) {
return nd4j_log<T>((T) 1.0f + nd4j_exp<T>(val));
}
template<typename T>
math_def inline T nd4j_softsign(T val) {
return val / ((T) 1.0f + nd4j::math::nd4j_abs<T>(val));
}
template<typename T>
math_def inline T nd4j_sqrt(T val);
template<typename T>
math_def inline T nd4j_tanh(T val);
template<typename T>
math_def inline T nd4j_tan(T val);
template<typename T>
math_def inline T nd4j_atan2(T val1, T val2);
template<>
math_def inline float16 nd4j_atan2<float16>(float16 value1, float16 value2) {
return (float16) atan2f((float) value1, (float) value2);
}
template<>
math_def inline float nd4j_atan2<float>(float value1, float value2) {
return atan2f(value1, value2);
}
template<>
math_def inline double nd4j_atan2<double>(double value1, double value2) {
return atan2(value1, value2);
}
template<typename T>
math_def inline T nd4j_tan(T val) {
return nd4j_log((val + 1 / (1 - val)) * 0.5);
}
template<typename T>
math_def inline T nd4j_tanhderivative(T val) {
T tanh = nd4j_tanh(val);
return (T) 1.0f - tanh * tanh;
}
template<typename T>
math_def inline T nd4j_sigmoidderivative(T val) {
T sigmoid = nd4j_sigmoid(val);
T out = sigmoid * ((T) 1.0f - sigmoid);
return out;
}
template<typename T>
math_def inline T nd4j_softsignderivative(T val) {
T y = (T) 1.0f + nd4j_abs(val);
return (T) 1.0f / (y * y);
}
template<typename T>
math_def inline T nd4j_sgn(T val) {
return val < (T) 0.0f ? (T) -1.0f : val > (T) 0.0f ? (T) 1.0f : (T) 0.0f;
}
template<typename T>
math_def inline T nd4j_sign(T val) {
return nd4j_sgn<T>(val);
}
template<typename T>
math_def inline T nd4j_signum(T val) {
return nd4j_sgn<T>(val);
}
//#ifndef __CUDACC__
template<>
math_def inline float16 nd4j_dot<float16>(float16 *x, float16 *y, int length) {
float16 dot = (float16) 0.0f;
// TODO: since we can't use simd on unions, we might use something else here.
for(int e = 0; e < length; e++) {
dot += x[e] * y[e];
}
return dot;
}
template<typename T>
math_def inline T nd4j_dot(T *x, T *y, int length) {
T dot = (T) 0.0f;
#pragma omp simd reduction(+:dot)
for(int e = 0; e < length; e++) {
dot += x[e] * y[e];
}
return dot;
}
//#endif
template<typename T>
math_def inline T nd4j_acos(T val);
template<typename T>
math_def inline T nd4j_acosh(T val);
template<typename T>
math_def inline T nd4j_asin(T val);
template<typename T>
math_def inline T nd4j_asinh(T val);
template<typename T>
math_def inline T nd4j_asinh(T val) {
//Math.log(Math.sqrt(Math.pow(x, 2) + 1) + x)
return nd4j_log(nd4j_sqrt(nd4j_pow(val, (T) 2) + (T) 1) + val);
}
template<typename T>
math_def inline T nd4j_atan(T val);
template<typename T>
math_def inline T nd4j_atanh(T val);
template<>
math_def inline float16 nd4j_abs<float16>(float16 value) {
#ifdef NATIVE_HALFS
return value < 0. ? __hneg(value.data) : value;
#else
return (float16) fabsf((float) value);
#endif
}
template<>
math_def inline float nd4j_abs<float>(float value) {
return fabsf(value);
}
template<>
math_def inline double nd4j_abs<double>(double value) {
return value < 0 ? -value : value;
}
template<>
math_def inline int nd4j_abs<int>(int value) {
return value < 0 ? -value : value;
}
template<>
math_def inline Nd4jIndex nd4j_abs<Nd4jIndex>(Nd4jIndex value) {
return value < 0 ? -value : value;
}
template<>
math_def inline float16 nd4j_rint<float16>(float16 value) {
return (float16) rintf((float) value);
}
template<>
math_def inline float nd4j_rint<float>(float value) {
return rintf(value);
}
template<>
math_def inline double nd4j_rint<double>(double value) {
return rint(value);
}
template<>
math_def inline int nd4j_rint<int>(int value) {
return value;
}
template<>
math_def inline Nd4jIndex nd4j_rint<Nd4jIndex>(Nd4jIndex value) {
return value;
}
template<>
math_def inline bool nd4j_isnan<float16>(float16 value) {
return *(value.data.getXP()) == 0x7fffU;
}
template<>
math_def inline bool nd4j_isnan<float>(float value) {
return value != value;
}
template<>
math_def inline bool nd4j_isnan<double>(double value) {
return value != value;
}
template<>
math_def inline bool nd4j_isnan<int>(int value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<Nd4jIndex>(Nd4jIndex value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<float16>(float16 value) {
return value < (float16) -HALF_MAX_VALUE || value > (float16) HALF_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<float>(float value) {
#ifdef __CUDACC__
return isinf(value);
#else
return std::isinf(value);
#endif
//return value < -FLOAT_MAX_VALUE || value > FLOAT_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<double>(double value) {
#ifdef __CUDACC__
return isinf(value);
#else
return std::isinf(value);
#endif
//return value < -DOUBLE_MAX_VALUE || value > DOUBLE_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<int>(int value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<Nd4jIndex>(Nd4jIndex value) {
return false;
}
template<typename T>
math_def inline bool nd4j_isfin(T value) {
return !nd4j_isnan<T>(value) && !nd4j_isinf<T>(value);
}
template<>
math_def inline float16 nd4j_copysign<float16>(float16 val1, float16 val2) {
return (float16) copysignf((float) val1, (float) val2);
}
template<>
math_def inline float nd4j_copysign<float>(float val1, float val2) {
return copysignf(val1, val2);
}
template<>
math_def inline double nd4j_copysign<double>(double val1, double val2) {
return copysign(val1, val2);
}
template<>
math_def inline int nd4j_copysign<int>(int val1, int val2) {
if (val2 < 0) return -(nd4j_abs<int>(val1));
else return nd4j_abs<int>(val1);
}
template<>
math_def inline Nd4jIndex nd4j_copysign<Nd4jIndex>(Nd4jIndex val1, Nd4jIndex val2) {
if (val2 < 0) return -(nd4j_abs<Nd4jIndex>(val1));
else return nd4j_abs<Nd4jIndex>(val1);
}
template<>
math_def inline float16 nd4j_max<float16>(float16 val1, float16 val2) {
return val1 > val2 ? val1 : val2;
}
template<>
math_def inline float nd4j_max<float>(float val1, float val2) {
return val1 > val2 ? val1 : val2;
}
template<>
math_def inline double nd4j_max<double>(double val1, double val2) {
return val1 > val2 ? val1 : val2;
}
template<>
math_def inline int nd4j_max<int>(int val1, int val2) {
return val1 > val2 ? val1 : val2;
}
template<>
math_def inline Nd4jIndex nd4j_max<Nd4jIndex>(Nd4jIndex val1, Nd4jIndex val2) {
return val1 > val2 ? val1 : val2;
}
template<>
math_def inline Nd4jIndex nd4j_min<Nd4jIndex>(Nd4jIndex val1, Nd4jIndex val2) {
return val1 < val2 ? val1 : val2;
}
template<>
math_def inline float16 nd4j_min<float16>(float16 val1, float16 val2) {
return val1 < val2 ? val1 : val2;
}
template<>
math_def inline float nd4j_min<float>(float val1, float val2) {
return val1 < val2 ? val1 : val2;
}
template<>
math_def inline double nd4j_min<double>(double val1, double val2) {
return val1 < val2 ? val1 : val2;
}
template<>
math_def inline int nd4j_min<int>(int val1, int val2) {
return val1 < val2 ? val1 : val2;
}
template<>
math_def inline float16 nd4j_ceil<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hceil(val.data)
#else
return ceilf((float) val);
#endif
}
template<>
math_def inline float nd4j_ceil<float>(float val1) {
return ceilf(val1);
}
template<>
math_def inline double nd4j_ceil<double>(double val) {
return ceil(val);
}
template<>
math_def inline int nd4j_ceil<int>(int val) {
return ceil((float) val);
}
template<>
math_def inline float16 nd4j_cos<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hcos(val.data);
#else
return cosf((float) val);
#endif
}
template<>
math_def inline float nd4j_cos<float>(float val) {
return cosf(val);
}
template<>
math_def inline double nd4j_cos<double>(double val) {
return cos(val);
}
template<>
math_def inline int nd4j_cos<int>(int val) {
return cosf((float) val);
}
template<>
math_def inline float16 nd4j_cosh<float16>(float16 val) {
return coshf((float) val);
}
template<>
math_def inline float nd4j_cosh<float>(float val) {
return coshf(val);
}
template<>
math_def inline double nd4j_cosh<double>(double val) {
return cosh(val);
}
template<>
math_def inline int nd4j_cosh<int>(int val) {
return coshf((float) val);
}
template<>
math_def inline float16 nd4j_exp<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hexp(val.data);
#else
return (float16) expf((float) val);
#endif
}
template<>
math_def inline float nd4j_exp<float>(float val) {
return expf(val);
}
template<>
math_def inline double nd4j_exp<double>(double val) {
return exp(val);
}
template<>
math_def inline int nd4j_exp<int>(int val) {
return expf((float) val);
}
template<>
math_def inline float16 nd4j_floor<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hfloor(val.data);
#else
return (float16) floorf((float) val);
#endif
}
template<>
math_def inline float nd4j_floor<float>(float val) {
return floorf(val);
}
template<>
math_def inline double nd4j_floor<double>(double val) {
return floor(val);
}
template<>
math_def inline int nd4j_floor<int>(int val) {
return floorf((float) val);
}
template<>
math_def inline float16 nd4j_log<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hlog(val.data);
#else
return (float16) logf((float) val);
#endif
}
template<>
math_def inline float nd4j_log<float>(float val) {
return logf(val);
}
template<>
math_def inline double nd4j_log<double>(double val) {
return log(val);
}
template<>
math_def inline int nd4j_log<int>(int val) {
return logf((int) val);
}
template<>
math_def inline float16 nd4j_pow<float16>(float16 val, float16 val2) {
return (float16) powf((float) val, (float) val2);
}
template<>
math_def inline float nd4j_pow<float>(float val, float val2) {
return powf(val, val2);
}
template<>
math_def inline double nd4j_pow<double>(double val, double val2) {
return pow(val, val2);
}
template<>
math_def inline int nd4j_pow<int>(int val, int val2) {
return powf((float) val, (float) val2);
}
template<>
math_def inline float16 nd4j_round<float16>(float16 val) {
return (float16) roundf((float) val);
}
template<>
math_def inline float nd4j_round<float>(float val) {
return roundf(val);
}
template<>
math_def inline float nd4j_remainder<float>(float num, float denom) {
return remainderf(num, denom);
}
template<>
math_def inline double nd4j_remainder<double>(double num, double denom) {
return remainder(num, denom);
}
template<>
math_def inline float16 nd4j_remainder<float16>(float16 num, float16 denom) {
return (float16) remainderf((float) num, (float) denom);
}
template<>
math_def inline float nd4j_fmod<float>(float num, float denom) {
return fmodf(num, denom);
}
template<>
math_def inline double nd4j_fmod<double>(double num, double denom) {
return fmod(num, denom);
}
template<>
math_def inline float16 nd4j_fmod<float16>(float16 num, float16 denom) {
return (float16) fmodf((float) num, (float) denom);
}
template<>
math_def inline float nd4j_erf<float>(float num) {
return erff(num);
}
template<>
math_def inline double nd4j_erf<double>(double num) {
return erf(num);
}
template<>
math_def inline float16 nd4j_erf<float16>(float16 num) {
return (float16) erff((float) num);
}
template<>
math_def inline float nd4j_erfc<float>(float num) {
return erfcf(num);
}
template<>
math_def inline double nd4j_erfc<double>(double num) {
return erfc(num);
}
template<>
math_def inline float16 nd4j_erfc<float16>(float16 num) {
return (float16) erfcf((float) num);
}
template<>
math_def inline double nd4j_round<double>(double val) {
return round(val);
}
template<>
math_def inline int nd4j_round<int>(int val) {
return round((float) val);
}
template<>
math_def inline float16 nd4j_sin<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hsin(val.data);
#else
return (float16) sinf((float) val);
#endif
}
template<>
math_def inline float nd4j_sin<float>(float val) {
return sinf(val);
}
template<>
math_def inline double nd4j_sin<double>(double val) {
return sin(val);
}
template<>
math_def inline int nd4j_sin<int>(int val) {
return sin((float) val);
}
template<>
math_def inline float16 nd4j_sinh<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hsin(val.data);
#else
return (float16) sinh((float) val);
#endif
}
template<>
math_def inline float nd4j_sinh<float>(float val) {
return sinhf(val);
}
template<>
math_def inline double nd4j_sinh<double>(double val) {
return sinh(val);
}
template<>
math_def inline int nd4j_sinh<int>(int val) {
return sinhf((float) val);
}
template<>
math_def inline float16 nd4j_sqrt<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hsqrt(val.data);
#else
return (float16) sqrtf((float) val);
#endif
}
template<>
math_def inline float nd4j_sqrt<float>(float val) {
return sqrtf(val);
}
template<>
math_def inline double nd4j_sqrt<double>(double val) {
return sqrt(val);
}
template<>
math_def inline int nd4j_sqrt<int>(int val) {
return sqrtf((float) val);
}
template<>
math_def inline float16 nd4j_tanh<float16>(float16 val) {
return (float16) tanhf((float) val);
}
template<>
math_def inline float nd4j_tanh<float>(float val) {
return tanhf(val);
}
template<>
math_def inline double nd4j_tanh<double>(double val) {
return tanh(val);
}
template<>
math_def inline int nd4j_tanh<int>(int val) {
return tanhf((float) val);
}
template<>
math_def inline float16 nd4j_tan<float16>(float16 val) {
return (float16) tanf((float) val);
}
template<>
math_def inline float nd4j_tan<float>(float val) {
return tanf(val);
}
template<>
math_def inline double nd4j_tan<double>(double val) {
return tan(val);
}
template<>
math_def inline int nd4j_tan<int>(int val) {
return tanf((float) val);
}
template<>
math_def inline float16 nd4j_acos<float16>(float16 val) {
return (float16) acosf((float) val);
}
template<>
math_def inline float nd4j_acos<float>(float val) {
return acosf(val);
}
template<>
math_def inline double nd4j_acos<double>(double val) {
return acos(val);
}
template<>
math_def inline int nd4j_acos<int>(int val) {
return acosf((float) val);
}
template<>
math_def inline float16 nd4j_acosh<float16>(float16 val) {
return (float16) acoshf((float) val);
}
template<>
math_def inline float nd4j_acosh<float>(float val) {
return acoshf(val);
}
template<>
math_def inline double nd4j_acosh<double>(double val) {
return acos(val);
}
template<>
math_def inline int nd4j_acosh<int>(int val) {
return acoshf((float) val);
}
template<>
math_def inline float16 nd4j_asin<float16>(float16 val) {
return (float16) asinf((float) val);
}
template<>
math_def inline float nd4j_asin<float>(float val) {
return asinf(val);
}
template<>
math_def inline double nd4j_asin<double>(double val) {
return asin(val);
}
template<>
math_def inline int nd4j_asin<int>(int val) {
return asinf((float) val);
}
template<>
math_def inline float16 nd4j_atan<float16>(float16 val) {
return (float16) atanf((float)val);
}
template<>
math_def inline float nd4j_atan<float>(float val) {
return atanf(val);
}
template<>
math_def inline double nd4j_atan<double>(double val) {
return atan(val);
}
template<>
math_def inline int nd4j_atan<int>(int val) {
return atanf((float) val);
}
template<>
math_def inline float16 nd4j_atanh<float16>(float16 val) {
return (float16) atanhf((float)val);
}
template<>
math_def inline float nd4j_atanh<float>(float val) {
return atanhf(val);
}
template<>
math_def inline double nd4j_atanh<double>(double val) {
return atanh(val);
}
template<>
math_def inline int nd4j_atanh<int>(int val) {
return atanhf((float) val);
}
template<typename T>
math_def inline void nd4j_swap(T &val1, T &val2) {
T temp = val1; val1=val2; val2=temp;
};
#ifdef __CUDACC__
namespace atomics {
template <typename T>
inline __device__ T nd4j_atomicAdd(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicSub(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMul(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicDiv(T* address, T val);
template <>
inline __device__ double nd4j_atomicAdd<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int *) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ float16 nd4j_atomicAdd<float16>(float16* address, float16 val) {
int* address_as_ull = (int*) address;
long addr = (long) address;
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (addr - 2);
PAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
float16 res = ((float16) old.B.H) + val;
fresh.B.H = res.data;
fresh.B.L = old.B.L;
} else {
float16 res = ((float16) old.B.L) + val;
fresh.B.L = res.data;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <>
inline __device__ double nd4j_atomicSub<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int *) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val -
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ double nd4j_atomicMul<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val *
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ double nd4j_atomicDiv<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val /
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ float nd4j_atomicAdd<float>(float* address, float val) {
return atomicAdd(address,val);
}
template <>
inline __device__ float nd4j_atomicSub<float>(float* address, float val) {
int* address_as_ull = (int*) address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(val -
__float_as_int(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ float nd4j_atomicMul<float>(float* address, float val) {
int* address_as_ull =
( int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(val *
__float_as_int(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ float nd4j_atomicDiv<float>(float* address, float val) {
int* address_as_ull =
(int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(val *
__float_as_int(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
}
#endif
}
}
#endif /* TEMPLATEMATH_H_ */ |
par_cheby.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Chebyshev setup and solve
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "_hypre_parcsr_mv.h"
#include "float.h"
/******************************************************************************
Chebyshev relaxation
Can specify order 1-4 (this is the order of the resid polynomial)- here we
explicitly code the coefficients (instead of
iteratively determining)
variant 0: standard chebyshev
this is rlx 11 if scale = 0, and 16 if scale == 1
variant 1: modified cheby: T(t)* f(t) where f(t) = (1-b/t)
this is rlx 15 if scale = 0, and 17 if scale == 1
ratio indicates the percentage of the whole spectrum to use (so .5
means half, and .1 means 10percent)
*******************************************************************************/
/**
* @brief Setups of coefficients (and optional diagonal scaling elements) for
* Chebyshev relaxation
*
* Will calculate ds_ptr on device/host depending on where A is located
*
* @param[in] A Matrix for which to seteup
* @param[in] max_eig Maximum eigenvalue
* @param[in] min_eig Maximum eigenvalue
* @param[in] fraction Fraction used to calculate lower bound
* @param[in] order Polynomial order to use [1,4]
* @param[in] scale Whether or not to scale by the diagonal
* @param[in] variant Whether or not to use a variant of Chebyshev (0 standard, 1 variant)
* @param[out] coefs_ptr *coefs_ptr will be allocated to contain coefficients of the polynomial
* @param[out] ds_ptr *ds_ptr will be allocated to allow scaling by the diagonal
*/
HYPRE_Int
hypre_ParCSRRelax_Cheby_Setup(hypre_ParCSRMatrix *A, /* matrix to relax with */
HYPRE_Real max_eig,
HYPRE_Real min_eig,
HYPRE_Real fraction,
HYPRE_Int order, /* polynomial order */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int variant,
HYPRE_Real **coefs_ptr,
HYPRE_Real **ds_ptr) /* initial/updated approximation */
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real theta, delta;
HYPRE_Real den;
HYPRE_Real upper_bound = 0.0, lower_bound = 0.0;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Real *coefs = NULL;
HYPRE_Int cheby_order;
HYPRE_Real *ds_data = NULL;
/* u = u + p(A)r */
if (order > 4)
{
order = 4;
}
if (order < 1)
{
order = 1;
}
coefs = hypre_CTAlloc(HYPRE_Real, order + 1, HYPRE_MEMORY_HOST);
/* we are using the order of p(A) */
cheby_order = order - 1;
if (min_eig >= 0.0)
{
/* make sure we are large enough - Adams et al. 2003 */
upper_bound = max_eig * 1.1;
/* lower_bound = max_eig/fraction; */
lower_bound = (upper_bound - min_eig) * fraction + min_eig;
}
else if (max_eig <= 0.0)
{
upper_bound = min_eig * 1.1;
lower_bound = max_eig - (max_eig - upper_bound) * fraction;
}
/* theta and delta */
theta = (upper_bound + lower_bound) / 2;
delta = (upper_bound - lower_bound) / 2;
if (variant == 1)
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less that resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0 / theta;
break;
case 1: /* (del - t + 2*th)/(th^2 + del*th) */
den = (theta * theta + delta * theta);
coefs[0] = (delta + 2 * theta) / den;
coefs[1] = -1.0 / den;
break;
case 2: /* (4*del*th - del^2 - t*(2*del + 6*th) + 2*t^2 + 6*th^2)/(2*del*th^2 - del^2*th - del^3 + 2*th^3)*/
den = 2 * delta * theta * theta - delta * delta * theta - pow(delta, 3) + 2 * pow(theta, 3);
coefs[0] = (4 * delta * theta - pow(delta, 2) + 6 * pow(theta, 2)) / den;
coefs[1] = -(2 * delta + 6 * theta) / den;
coefs[2] = 2 / den;
break;
case 3: /* -(6*del^2*th - 12*del*th^2 - t^2*(4*del + 16*th) + t*(12*del*th - 3*del^2 + 24*th^2) + 3*del^3 + 4*t^3 - 16*th^3)/(4*del*th^3 - 3*del^2*th^2 - 3*del^3*th + 4*th^4)*/
den = - (4 * delta * pow(theta, 3) - 3 * pow(delta, 2) * pow(theta, 2) - 3 * pow(delta,
3) * theta + 4 * pow(theta,
4) );
coefs[0] = (6 * pow(delta, 2) * theta - 12 * delta * pow(theta, 2) + 3 * pow(delta,
3) - 16 * pow(theta, 3) ) / den;
coefs[1] = (12 * delta * theta - 3 * pow(delta, 2) + 24 * pow(theta, 2)) / den;
coefs[2] = -( 4 * delta + 16 * theta) / den;
coefs[3] = 4 / den;
break;
}
}
else /* standard chebyshev */
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less thatn resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0 / theta;
break;
case 1: /* ( 2*t - 4*th)/(del^2 - 2*th^2) */
den = delta * delta - 2 * theta * theta;
coefs[0] = -4 * theta / den;
coefs[1] = 2 / den;
break;
case 2: /* (3*del^2 - 4*t^2 + 12*t*th - 12*th^2)/(3*del^2*th - 4*th^3)*/
den = 3 * (delta * delta) * theta - 4 * (theta * theta * theta);
coefs[0] = (3 * delta * delta - 12 * theta * theta) / den;
coefs[1] = 12 * theta / den;
coefs[2] = -4 / den;
break;
case 3: /*(t*(8*del^2 - 48*th^2) - 16*del^2*th + 32*t^2*th - 8*t^3 + 32*th^3)/(del^4 - 8*del^2*th^2 + 8*th^4)*/
den = pow(delta, 4) - 8 * delta * delta * theta * theta + 8 * pow(theta, 4);
coefs[0] = (32 * pow(theta, 3) - 16 * delta * delta * theta) / den;
coefs[1] = (8 * delta * delta - 48 * theta * theta) / den;
coefs[2] = 32 * theta / den;
coefs[3] = -8 / den;
break;
}
}
*coefs_ptr = coefs;
if (scale)
{
/*grab 1/sqrt(abs(diagonal)) */
ds_data = hypre_CTAlloc(HYPRE_Real, num_rows, hypre_ParCSRMatrixMemoryLocation(A));
hypre_CSRMatrixExtractDiagonal(hypre_ParCSRMatrixDiag(A), ds_data, 4);
} /* end of scaling code */
*ds_ptr = ds_data;
return hypre_error_flag;
}
/**
* @brief Solve using a chebyshev polynomial on the host
*
* @param[in] A Matrix to relax with
* @param[in] f right-hand side
* @param[in] ds_data Diagonal information
* @param[in] coefs Polynomial coefficients
* @param[in] order Order of the polynomial
* @param[in] scale Whether or not to scale by diagonal
* @param[in] scale Whether or not to use a variant
* @param[in,out] u Initial/updated approximation
* @param[in] v Temp vector
* @param[in] r Temp Vector
* @param[in] orig_u_vec Temp Vector
* @param[in] tmp Temp Vector
*/
HYPRE_Int
hypre_ParCSRRelax_Cheby_SolveHost(hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
HYPRE_Real *ds_data,
HYPRE_Real *coefs,
HYPRE_Int order, /* polynomial order */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int variant,
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v, /* temporary vector */
hypre_ParVector *r, /* another vector */
hypre_ParVector *orig_u_vec, /*another temp vector */
hypre_ParVector *tmp_vec) /*a potential temp vector */
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
HYPRE_Real *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
HYPRE_Real *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r));
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Real mult;
HYPRE_Real *orig_u;
HYPRE_Int cheby_order;
HYPRE_Real *tmp_data;
/* u = u + p(A)r */
if (order > 4)
{
order = 4;
}
if (order < 1)
{
order = 1;
}
/* we are using the order of p(A) */
cheby_order = order - 1;
hypre_assert(hypre_VectorSize(hypre_ParVectorLocalVector(orig_u_vec)) >= num_rows);
orig_u = hypre_VectorData(hypre_ParVectorLocalVector(orig_u_vec));
if (!scale)
{
/* get residual: r = f - A*u */
hypre_ParVectorCopy(f, r);
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, r);
/* o = u; u = r .* coef */
for ( i = 0; i < num_rows; i++ )
{
orig_u[i] = u_data[i];
u_data[i] = r_data[i] * coefs[cheby_order];
}
for (i = cheby_order - 1; i >= 0; i-- )
{
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, v);
mult = coefs[i];
/* u = mult * r + v */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = mult * r_data[j] + v_data[j];
}
}
/* u = o + u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for ( i = 0; i < num_rows; i++ )
{
u_data[i] = orig_u[i] + u_data[i];
}
}
else /* scaling! */
{
/*grab 1/sqrt(diagonal) */
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(tmp_vec));
/* get ds_data and get scaled residual: r = D^(-1/2)f -
* D^(-1/2)A*u */
hypre_ParCSRMatrixMatvec(-1.0, A, u, 0.0, tmp_vec);
/* r = ds .* (f + tmp) */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
r_data[j] = ds_data[j] * (f_data[j] + tmp_data[j]);
}
/* save original u, then start
the iteration by multiplying r by the cheby coef.*/
/* o = u; u = r * coef */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
orig_u[j] = u_data[j]; /* orig, unscaled u */
u_data[j] = r_data[j] * coefs[cheby_order];
}
/* now do the other coefficients */
for (i = cheby_order - 1; i >= 0; i-- )
{
/* v = D^(-1/2)AD^(-1/2)u */
/* tmp = ds .* u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
tmp_data[j] = ds_data[j] * u_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, tmp_vec, 0.0, v);
/* u_new = coef*r + v*/
mult = coefs[i];
/* u = coef * r + ds .* v */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = mult * r_data[j] + ds_data[j] * v_data[j];
}
} /* end of cheby_order loop */
/* now we have to scale u_data before adding it to u_orig*/
/* u = orig_u + ds .* u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = orig_u[j] + ds_data[j] * u_data[j];
}
}/* end of scaling code */
return hypre_error_flag;
}
/**
* @brief Solve using a chebyshev polynomial
*
* Determines whether to solve on host or device
*
* @param[in] A Matrix to relax with
* @param[in] f right-hand side
* @param[in] ds_data Diagonal information
* @param[in] coefs Polynomial coefficients
* @param[in] order Order of the polynomial
* @param[in] scale Whether or not to scale by diagonal
* @param[in] scale Whether or not to use a variant
* @param[in,out] u Initial/updated approximation
* @param[out] v Temp vector
* @param[out] r Temp Vector
* @param[out] orig_u_vec Temp Vector
* @param[out] tmp_vec Temp Vector
*/
HYPRE_Int
hypre_ParCSRRelax_Cheby_Solve(hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
HYPRE_Real *ds_data,
HYPRE_Real *coefs,
HYPRE_Int order, /* polynomial order */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int variant,
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v, /* temporary vector */
hypre_ParVector *r, /*another temp vector */
hypre_ParVector *orig_u_vec, /*another temp vector */
hypre_ParVector *tmp_vec) /*another temp vector */
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPushRange("ParCSRRelaxChebySolve");
#endif
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(hypre_ParCSRMatrixMemoryLocation(A));
HYPRE_Int ierr = 0;
if (exec == HYPRE_EXEC_HOST)
{
ierr = hypre_ParCSRRelax_Cheby_SolveHost(A, f, ds_data, coefs, order, scale, variant, u, v, r,
orig_u_vec, tmp_vec);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
else
{
ierr = hypre_ParCSRRelax_Cheby_SolveDevice(A, f, ds_data, coefs, order, scale, variant, u, v, r,
orig_u_vec, tmp_vec);
}
#endif
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPopRange();
#endif
return ierr;
}
|
convolution_pack1to8_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_pack1to8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
const signed char* kptr = weight_data_int8.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++)
{
int8x8_t _val = vdup_n_s8(sptr[space_ofs[k]]);
int8x8_t _w = vld1_s8(kptr);
int16x8_t _s0 = vmull_s8(_val, _w);
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s0));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s0));
kptr += 8;
}
}
vst1q_s32(outptr + j * 8, _sum0);
vst1q_s32(outptr + j * 8 + 4, _sum1);
}
outptr += outw * 8;
}
}
}
|
gt.mapset.c | /*
* PROJECT: GEM-Tools library
* FILE: gt.mapset.c
* DATE: 08/11/2012
* AUTHOR(S): Santiago Marco-Sola <santiagomsola@gmail.com>
* DESCRIPTION: Utility to perform set operations {UNION,INTERSECTION,DIFFERENCE} over alignment files {MAP,SAM}
*/
#include <getopt.h>
#ifdef HAVE_OPENMP
#include <omp.h>
#endif
#include "gem_tools.h"
typedef enum { GT_MAP_SET_UNKNOWN,
GT_MAP_SET_INTERSECTION, GT_MAP_SET_UNION, GT_MAP_SET_DIFFERENCE,
GT_MAP_SET_JOIN, GT_MAP_SET_COMPARE,
GT_MERGE_MAP, GT_DISPLAY_COMPACT_MAP} gt_operation;
typedef struct {
gt_operation operation;
char* name_input_file_1;
char* name_input_file_2;
char* name_output_file;
bool mmap_input;
bool paired_end;
bool files_contain_same_reads;
double eq_threshold;
bool strict;
bool verbose;
uint64_t num_threads;
} gt_stats_args;
gt_stats_args parameters = {
.operation=GT_MAP_SET_UNKNOWN,
.name_input_file_1=NULL,
.name_input_file_2=NULL,
.name_output_file=NULL,
.mmap_input=false,
.paired_end=false,
.files_contain_same_reads=false,
.eq_threshold=0.5,
.strict=false,
.verbose=false,
.num_threads=1
};
uint64_t current_read_length;
int64_t gt_mapset_map_cmp(gt_map* const map_1,gt_map* const map_2) {
const uint64_t eq_threshold = (parameters.eq_threshold <= 1.0) ?
parameters.eq_threshold*current_read_length: parameters.eq_threshold;
return parameters.strict ? gt_map_cmp(map_1,map_2) : gt_map_range_cmp(map_1,map_2,eq_threshold);
}
int64_t gt_mapset_mmap_cmp(gt_map** const map_1,gt_map** const map_2,const uint64_t num_maps) {
const uint64_t eq_threshold = (parameters.eq_threshold <= 1.0) ?
parameters.eq_threshold*current_read_length: parameters.eq_threshold;
return parameters.strict ? gt_mmap_cmp(map_1,map_2,num_maps) : gt_mmap_range_cmp(map_1,map_2,num_maps,eq_threshold);
}
GT_INLINE gt_status gt_mapset_read_template_sync(
gt_buffered_input_file* const buffered_input_master,gt_buffered_input_file* const buffered_input_slave,
gt_buffered_output_file* const buffered_output,gt_template* const template_master,gt_template* const template_slave,
const gt_operation operation) {
// Read master
gt_status error_code_master, error_code_slave;
gt_output_map_attributes* output_attributes = gt_output_map_attributes_new();
gt_generic_parser_attributes* generic_parser_attr = gt_input_generic_parser_attributes_new(parameters.paired_end);
if ((error_code_master=gt_input_generic_parser_get_template(
buffered_input_master,template_master,generic_parser_attr))==GT_IMP_FAIL) {
gt_fatal_error_msg("Fatal error parsing file <<Master>>");
}
// Read slave
if ((error_code_slave=gt_input_generic_parser_get_template(
buffered_input_slave,template_slave,generic_parser_attr))==GT_IMP_FAIL) {
gt_fatal_error_msg("Fatal error parsing file <<Slave>>");
}
// Check EOF conditions
if (error_code_master==GT_IMP_EOF) {
if (error_code_slave!=GT_IMP_EOF) {
gt_fatal_error_msg("<<Slave>> contains more/different reads from <<Master>>");
}
return GT_IMP_EOF;
} else if (error_code_slave==GT_IMP_EOF) { // Slave exhausted. Dump master & return EOF
do {
if (error_code_master==GT_IMP_FAIL) gt_fatal_error_msg("Fatal error parsing file <<Master>>");
if (operation==GT_MAP_SET_UNION || operation==GT_MAP_SET_DIFFERENCE) {
gt_output_map_bofprint_template(buffered_output,template_master,output_attributes);
}
} while ((error_code_master=gt_input_generic_parser_get_template(
buffered_input_master,template_master,generic_parser_attr)));
return GT_IMP_EOF;
}
// Synch loop
while (!(gt_streq(gt_template_get_tag(template_master),gt_template_get_tag(template_slave)) &&
gt_template_get_pair(template_master)==gt_template_get_pair(template_slave))) {
// Print non correlative master's template
if (operation==GT_MAP_SET_UNION || operation==GT_MAP_SET_DIFFERENCE) {
gt_output_map_bofprint_template(buffered_output,template_master,output_attributes);
}
// Fetch next master's template
if ((error_code_master=gt_input_generic_parser_get_template(
buffered_input_master,template_master,generic_parser_attr))!=GT_IMP_OK) {
gt_fatal_error_msg("<<Slave>> contains more/different reads from <<Master>>");
}
}
return GT_IMP_OK;
}
GT_INLINE gt_status gt_mapset_read_template_get_commom_map(
gt_buffered_input_file* const buffered_input_master,gt_buffered_input_file* const buffered_input_slave,
gt_template* const template_master,gt_template* const template_slave) {
gt_status error_code_master, error_code_slave;
gt_generic_parser_attributes* generic_parser_attr = gt_input_generic_parser_attributes_new(parameters.paired_end);
// Read master
if ((error_code_master=gt_input_generic_parser_get_template(
buffered_input_master,template_master,generic_parser_attr))==GT_IMP_FAIL) {
gt_fatal_error_msg("Fatal error parsing file <<Master>>");
}
if (error_code_master==GT_IMP_EOF) return GT_IMP_EOF;
// Read slave
if ((error_code_slave=gt_input_generic_parser_get_template(
buffered_input_slave,template_slave,generic_parser_attr))==GT_IMP_FAIL) {
gt_fatal_error_msg("Fatal error parsing file <<Slave>>");
}
if (error_code_slave==GT_IMP_EOF) { // Check EOF conditions
gt_fatal_error_msg("<<Slave>> is not contained in master <<Master>> (looking for '"PRIgts"')",
PRIgts_content(gt_template_get_string_tag(template_master)));
}
// Synch loop
while (!(gt_streq(gt_template_get_tag(template_master),gt_template_get_tag(template_slave)) &&
gt_template_get_pair(template_master)==gt_template_get_pair(template_slave))) {
// Fetch next slave's template
if ((error_code_master=gt_input_generic_parser_get_template(
buffered_input_slave,template_slave,generic_parser_attr))!=GT_IMP_OK) {
gt_fatal_error_msg("<<Slave>> is not contained in master <<Master>> (looking for '"PRIgts"')",
PRIgts_content(gt_template_get_string_tag(template_master)));
}
}
return GT_IMP_OK;
}
void gt_mapset_perform_set_operations() {
// File IN/OUT
gt_input_file* input_file_1 = gt_input_file_open(parameters.name_input_file_1,parameters.mmap_input);
gt_input_file* input_file_2 = (parameters.name_input_file_2==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file_2,parameters.mmap_input);
if (parameters.name_input_file_2==NULL) GT_SWAP(input_file_1,input_file_2);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
// Buffered I/O
gt_buffered_input_file* buffered_input_1 = gt_buffered_input_file_new(input_file_1);
gt_buffered_input_file* buffered_input_2 = gt_buffered_input_file_new(input_file_2);
gt_buffered_output_file* buffered_output = gt_buffered_output_file_new(output_file);
gt_buffered_input_file_attach_buffered_output(buffered_input_1,buffered_output);
// Template I/O (synch)
gt_template *template_1 = gt_template_new();
gt_template *template_2 = gt_template_new();
gt_output_map_attributes* output_attributes = gt_output_map_attributes_new();
while (gt_mapset_read_template_sync(buffered_input_1,buffered_input_2,
buffered_output,template_1,template_2,parameters.operation)) {
// Record current read length
current_read_length = gt_template_get_total_length(template_1);
// Apply operation
gt_template *ptemplate;
switch (parameters.operation) {
case GT_MAP_SET_UNION:
ptemplate=gt_template_union_template_mmaps_fx(gt_mapset_mmap_cmp,gt_mapset_map_cmp,template_1,template_2);
break;
case GT_MAP_SET_INTERSECTION:
ptemplate=gt_template_intersect_template_mmaps_fx(gt_mapset_mmap_cmp,gt_mapset_map_cmp,template_1,template_2);
break;
case GT_MAP_SET_DIFFERENCE:
ptemplate=gt_template_subtract_template_mmaps_fx(gt_mapset_mmap_cmp,gt_mapset_map_cmp,template_1,template_2);
break;
default:
gt_fatal_error(SELECTION_NOT_VALID);
break;
}
// Print template
gt_output_map_bofprint_template(buffered_output,ptemplate,output_attributes);
// Delete template
gt_template_delete(ptemplate);
}
// Clean
gt_template_delete(template_1);
gt_template_delete(template_2);
gt_buffered_input_file_close(buffered_input_1);
gt_buffered_input_file_close(buffered_input_2);
gt_buffered_output_file_close(buffered_output);
gt_input_file_close(input_file_1);
gt_input_file_close(input_file_2);
gt_output_file_close(output_file);
}
void gt_mapset_perform_cmp_operations() {
// File IN/OUT
gt_input_file* input_file_1 = gt_input_file_open(parameters.name_input_file_1,parameters.mmap_input);
gt_input_file* input_file_2 = (parameters.name_input_file_2==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file_2,parameters.mmap_input);
if (parameters.name_input_file_2==NULL) GT_SWAP(input_file_1,input_file_2);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
// Buffered I/O
gt_buffered_input_file* buffered_input_1 = gt_buffered_input_file_new(input_file_1);
gt_buffered_input_file* buffered_input_2 = gt_buffered_input_file_new(input_file_2);
gt_buffered_output_file* buffered_output = gt_buffered_output_file_new(output_file);
gt_buffered_input_file_attach_buffered_output(buffered_input_1,buffered_output);
// Template I/O (synch)
gt_template *template_1 = gt_template_new();
gt_template *template_2 = gt_template_new();
gt_output_map_attributes* output_map_attributes = gt_output_map_attributes_new();
while (gt_mapset_read_template_get_commom_map(buffered_input_1,buffered_input_2,template_1,template_2)) {
// Record current read length
current_read_length = gt_template_get_total_length(template_1);
// Apply operation
switch (parameters.operation) {
case GT_MAP_SET_JOIN:
// Print Master's TAG+Counters+Maps
gt_output_map_bofprint_tag(buffered_output,template_1->tag,template_1->attributes,output_map_attributes);
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_counters(buffered_output,gt_template_get_counters_vector(template_1),
template_1->attributes,output_map_attributes); // Master's Counters
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_counters(buffered_output,gt_template_get_counters_vector(template_2),
template_1->attributes,output_map_attributes); // Slave's Counters
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_template_maps(buffered_output,template_1,output_map_attributes); // Master's Maps
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_template_maps(buffered_output,template_2,output_map_attributes); // Slave's Maps
gt_bofprintf(buffered_output,"\n");
break;
case GT_MAP_SET_COMPARE: {
// Perform simple cmp operations
gt_template *template_master_minus_slave=gt_template_subtract_template_mmaps_fx(gt_mapset_mmap_cmp,gt_mapset_map_cmp,template_1,template_2);
gt_template *template_slave_minus_master=gt_template_subtract_template_mmaps_fx(gt_mapset_mmap_cmp,gt_mapset_map_cmp,template_2,template_1);
gt_template *template_intersection=gt_template_intersect_template_mmaps_fx(gt_mapset_mmap_cmp,gt_mapset_map_cmp,template_1,template_2);
/*
* Print results :: (TAG (Master-Slave){COUNTER MAPS} (Slave-Master){COUNTER MAPS} (Intersection){COUNTER MAPS})
*/
gt_output_map_bofprint_tag(buffered_output,template_1->tag,template_1->attributes,output_map_attributes);
// Counters
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_counters(buffered_output,gt_template_get_counters_vector(template_master_minus_slave),
template_master_minus_slave->attributes,output_map_attributes); // (Master-Slave){COUNTER}
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_counters(buffered_output,gt_template_get_counters_vector(template_slave_minus_master),
template_slave_minus_master->attributes,output_map_attributes); // (Slave-Master){COUNTER}
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_counters(buffered_output,gt_template_get_counters_vector(template_intersection),
template_intersection->attributes,output_map_attributes); // (Intersection){COUNTER}
// Maps
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_template_maps(buffered_output,template_master_minus_slave,output_map_attributes); // (Master-Slave){COUNTER}
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_template_maps(buffered_output,template_slave_minus_master,output_map_attributes); // (Slave-Master){COUNTER}
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_template_maps(buffered_output,template_intersection,output_map_attributes); // (Intersection){COUNTER}
gt_bofprintf(buffered_output,"\n");
// Delete templates
gt_template_delete(template_master_minus_slave);
gt_template_delete(template_slave_minus_master);
gt_template_delete(template_intersection);
}
break;
default:
gt_fatal_error(SELECTION_NOT_VALID);
break;
}
}
// Clean
gt_template_delete(template_1);
gt_template_delete(template_2);
gt_buffered_input_file_close(buffered_input_1);
gt_buffered_input_file_close(buffered_input_2);
gt_buffered_output_file_close(buffered_output);
gt_input_file_close(input_file_1);
gt_input_file_close(input_file_2);
gt_output_file_close(output_file);
}
void gt_mapset_perform_merge_map() {
// Open file IN/OUT
gt_input_file* input_file_1 = gt_input_file_open(parameters.name_input_file_1,parameters.mmap_input);
gt_input_file* input_file_2 = (parameters.name_input_file_2==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file_2,parameters.mmap_input);
if (parameters.name_input_file_2==NULL) GT_SWAP(input_file_1,input_file_2);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
// Mutex
pthread_mutex_t input_mutex = PTHREAD_MUTEX_INITIALIZER;
// Parallel reading+process
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(parameters.num_threads)
#endif
{
if (parameters.files_contain_same_reads) {
gt_merge_synch_map_files(&input_mutex,parameters.paired_end,output_file,input_file_1,input_file_2);
} else {
gt_merge_unsynch_map_files(&input_mutex,input_file_1,input_file_2,parameters.paired_end,output_file);
}
}
// Clean
gt_input_file_close(input_file_1);
gt_input_file_close(input_file_2);
gt_output_file_close(output_file);
}
void gt_mapset_display_compact_map() {
// Open file IN/OUT
gt_input_file* input_file = (parameters.name_input_file_1==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file_1,parameters.mmap_input);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(parameters.num_threads)
#endif
{
gt_output_map_attributes* const output_map_attributes = gt_output_map_attributes_new();
output_map_attributes->compact = true;
GT_BEGIN_READING_WRITING_LOOP(input_file,output_file,parameters.paired_end,buffered_output,template) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
// Print compact summary
gt_bofprintf(buffered_output,"End1::"PRIgts"[%"PRIu64"]\t",PRIgts_content(alignment->tag),gt_string_get_length(alignment->read));
gt_output_map_bofprint_counters(buffered_output,alignment->counters,alignment->attributes,output_map_attributes);
gt_bofprintf(buffered_output,"\t");
uint64_t printed = 0;
GT_ALIGNMENT_ITERATE(alignment,map) {
if (printed>0) {
gt_bofprintf(buffered_output,","PRIgts,PRIgts_content(map->seq_name));
} else {
gt_bofprintf(buffered_output,PRIgts,PRIgts_content(map->seq_name));
}
++printed;
}
gt_bofprintf(buffered_output,"\n");
}
} GT_END_READING_WRITING_LOOP(input_file,output_file,template);
// Clean
gt_output_map_attributes_delete(output_map_attributes);
}
// Clean
gt_input_file_close(input_file);
gt_output_file_close(output_file);
}
#define GT_MAPSET_OPERATIONS "union,intersection,difference,compare,join,merge-map,display-compact"
void gt_filter_parse_operation(char* const string_operation) {
if (gt_streq(string_operation,"INTERSECCTION") || gt_streq(string_operation,"Intersection") || gt_streq(string_operation,"intersection")) {
parameters.operation = GT_MAP_SET_INTERSECTION;
} else if (gt_streq(string_operation,"UNION") || gt_streq(string_operation,"Union") || gt_streq(string_operation,"union")) {
parameters.operation = GT_MAP_SET_UNION;
} else if (gt_streq(string_operation,"DIFFERENCE") || gt_streq(string_operation,"Difference") || gt_streq(string_operation,"difference")) {
parameters.operation = GT_MAP_SET_DIFFERENCE;
} else if (gt_streq(string_operation,"COMPARE") || gt_streq(string_operation,"Compare") || gt_streq(string_operation,"compare")) {
parameters.operation = GT_MAP_SET_COMPARE;
} else if (gt_streq(string_operation,"JOIN") || gt_streq(string_operation,"Join") || gt_streq(string_operation,"join")) {
parameters.operation = GT_MAP_SET_JOIN;
} else if (gt_streq(string_operation,"MERGE-MAP") || gt_streq(string_operation,"Merge-map") || gt_streq(string_operation,"merge-map")) {
parameters.operation = GT_MERGE_MAP;
} else if (gt_streq(string_operation,"DISPLAY-COMPACT") || gt_streq(string_operation,"Display-compact") || gt_streq(string_operation,"display-compact")) {
parameters.operation = GT_DISPLAY_COMPACT_MAP;
} else {
if (string_operation[0]=='I' || string_operation[0]=='i') {
fprintf(stderr,"\tAssuming 'Intersection' ...\n");
parameters.operation = GT_MAP_SET_INTERSECTION;
} else if (string_operation[0]=='U' || string_operation[0]=='u') {
fprintf(stderr,"\tAssuming 'Union' ...\n");
parameters.operation = GT_MAP_SET_UNION;
} else if (string_operation[0]=='D' || string_operation[0]=='d') {
fprintf(stderr,"\tAssuming 'Difference' ...\n");
parameters.operation = GT_MAP_SET_DIFFERENCE;
} else if (string_operation[0]=='C' || string_operation[0]=='c') {
fprintf(stderr,"\tAssuming 'Compare' ...\n");
parameters.operation = GT_MAP_SET_COMPARE;
} else if (string_operation[0]=='P' || string_operation[0]=='p') {
fprintf(stderr,"\tAssuming 'Join' ...\n");
parameters.operation = GT_MAP_SET_JOIN;
} else if (string_operation[0]=='M' || string_operation[0]=='m') {
fprintf(stderr,"\tAssuming 'Merge-map' ...\n");
parameters.operation = GT_MERGE_MAP;
} else {
gt_fatal_error_msg("Unknown operation '%s' in {"GT_MAPSET_OPERATIONS"}",string_operation);
}
}
}
void parse_arguments(int argc,char** argv) {
struct option* gt_mapset_getopt = gt_options_adaptor_getopt(gt_mapset_options);
gt_string* const gt_mapset_short_getopt = gt_options_adaptor_getopt_short(gt_mapset_options);
int option, option_index;
while (true) {
// Get option & Select case
if ((option=getopt_long(argc,argv,
gt_string_get_string(gt_mapset_short_getopt),gt_mapset_getopt,&option_index))==-1) break;
// c=getopt_long(argc,argv,"i:o:psht:v",long_options,&option_index);
switch (option) {
/* Operations */
case 'C':
gt_filter_parse_operation(optarg);
break;
/* I/O */
case 300:
parameters.name_input_file_1 = optarg;
break;
case 301:
parameters.name_input_file_2 = optarg;
break;
case 'p':
parameters.paired_end = true;
break;
case 302:
parameters.mmap_input = true;
gt_fatal_error(NOT_IMPLEMENTED);
break;
case 'o':
parameters.name_output_file = optarg;
break;
/* Compare Function */
case 's': // files-with-same-reads
parameters.files_contain_same_reads = true;
break;
case 400: // eq-th
parameters.eq_threshold = atof(optarg);
break;
case 401: // strict
parameters.strict = true;
break;
/* Misc */
case 'v':
parameters.verbose = true;
break;
case 't':
#ifdef HAVE_OPENMP
parameters.num_threads = atol(optarg);
#endif
break;
case 'h':
fprintf(stderr, "USE: ./gt.mapset [OPERATION] [ARGS]...\n");
gt_options_fprint_menu(stderr,gt_mapset_options,gt_mapset_groups,false,false);
exit(1);
case 'J':
gt_options_fprint_json_menu(stderr,gt_mapset_options,gt_mapset_groups,true,false);
exit(1);
break;
case '?':
default:
gt_fatal_error_msg("Option not recognized");
}
}
// Check parameters
if (parameters.operation==GT_MAP_SET_UNKNOWN) {
gt_fatal_error_msg("Please specify operation {"GT_MAPSET_OPERATIONS"}");
}
if (parameters.operation!=GT_DISPLAY_COMPACT_MAP && !parameters.name_input_file_1) {
gt_fatal_error_msg("Input file 1 required (--i1)\n");
}
// Free
gt_string_delete(gt_mapset_short_getopt);
}
int main(int argc,char** argv) {
// GT error handler
gt_handle_error_signals();
// Parsing command-line options
parse_arguments(argc,argv);
// Do it !
if (parameters.operation==GT_MERGE_MAP) {
gt_mapset_perform_merge_map();
} else if (parameters.operation==GT_DISPLAY_COMPACT_MAP) {
gt_mapset_display_compact_map();
} else if (parameters.operation==GT_MAP_SET_INTERSECTION ||
parameters.operation==GT_MAP_SET_UNION ||
parameters.operation==GT_MAP_SET_DIFFERENCE) {
gt_mapset_perform_set_operations();
} else {
gt_mapset_perform_cmp_operations();
}
return 0;
}
|
GB_unop__identity_bool_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_uint8)
// op(A') function: GB (_unop_tran__identity_bool_uint8)
// C type: bool
// A type: uint8_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_uint8)
(
bool *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__rminus_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint64)
// A*D function (colscale): GB (_AxD__rminus_uint64)
// D*A function (rowscale): GB (_DxB__rminus_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint64)
// C=scalar+B GB (_bind1st__rminus_uint64)
// C=scalar+B' GB (_bind1st_tran__rminus_uint64)
// C=A+scalar GB (_bind2nd__rminus_uint64)
// C=A'+scalar GB (_bind2nd_tran__rminus_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_UINT64 || GxB_NO_RMINUS_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rminus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
task_types.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
#include <omp.h>
#include <math.h>
int main() {
//initialize the OpenMP runtime
omp_get_num_threads();
// initial task
print_ids(0);
int x;
// implicit task
#pragma omp parallel num_threads(1)
{
print_ids(0);
x++;
}
#pragma omp parallel num_threads(2)
{
// explicit task
#pragma omp single
#pragma omp task
{
print_ids(0);
x++;
}
// explicit task with undeferred
#pragma omp single
#pragma omp task if (0)
{
print_ids(0);
x++;
}
// explicit task with untied
#pragma omp single
#pragma omp task untied
{
// Output of thread_id is needed to know on which thread task is executed
printf("%" PRIu64 ": explicit_untied\n", ompt_get_thread_data()->value);
print_ids(0);
x++;
}
// explicit task with final
#pragma omp single
#pragma omp task final(1)
{
print_ids(0);
x++;
// nested explicit task with final and undeferred
#pragma omp task
{
print_ids(0);
x++;
}
}
// Mergeable task test deactivated for now
// explicit task with mergeable
/*
#pragma omp task mergeable if((int)sin(0))
{
print_ids(0);
x++;
}
*/
// TODO: merged task
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create: parent_task_id=0
// CHECK-SAME: parent_task_frame.exit=[[NULL]]
// CHECK-SAME: parent_task_frame.reenter=[[NULL]]
// CHECK-SAME: new_task_id=[[INITIAL_TASK_ID:[0-9]+]], codeptr_ra=[[NULL]]
// CHECK-SAME: task_type=ompt_task_initial=1, has_dependences=no
// CHECK-NOT: 0: parallel_data initially not null
// initial task
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id={{[0-9]+}}
// CHECK-SAME: task_id=[[INITIAL_TASK_ID]], exit_frame=[[NULL]]
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK-SAME: task_type=ompt_task_initial=1, thread_num=0
// implicit task
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id={{[0-9]+}}
// CHECK-SAME: task_id={{[0-9]+}}, exit_frame={{0x[0-f]+}}
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK-SAME: task_type=ompt_task_implicit|ompt_task_undeferred=134217730
// CHECK-SAME: thread_num=0
// explicit task
// CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}
// CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}}
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}
// CHECK-SAME: new_task_id=[[EXPLICIT_TASK_ID:[0-9]+]]
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK-SAME: task_type=ompt_task_explicit=4
// CHECK-SAME: has_dependences=no
// CHECK: [[THREAD_ID_1:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: second_task_id=[[EXPLICIT_TASK_ID]]
// CHECK: [[THREAD_ID_1]]: task level 0: parallel_id=[[PARALLEL_ID:[0-9]+]]
// CHECK-SAME: task_id=[[EXPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}
// CHECK-SAME: reenter_frame=[[NULL]], task_type=ompt_task_explicit=4
// CHECK-SAME: thread_num={{[01]}}
// explicit task with undeferred
// CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}
// CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}}
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}
// CHECK-SAME: new_task_id=[[EXPLICIT_UNDEFERRED_TASK_ID:[0-9]+]]
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_undeferred=134217732
// CHECK-SAME: has_dependences=no
// CHECK: [[THREAD_ID_2:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: second_task_id=[[EXPLICIT_UNDEFERRED_TASK_ID]]
// CHECK: [[THREAD_ID_2]]: task level 0: parallel_id=[[PARALLEL_ID]]
// CHECK-SAME: task_id=[[EXPLICIT_UNDEFERRED_TASK_ID]]
// CHECK-SAME: exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_undeferred=134217732
// CHECK-SAME: thread_num={{[01]}}
// explicit task with untied
// CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}
// CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}}
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}
// CHECK-SAME: new_task_id=[[EXPLICIT_UNTIED_TASK_ID:[0-9]+]]
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_untied=268435460
// CHECK-SAME: has_dependences=no
// Here the thread_id cannot be taken from a schedule event as there
// may be multiple of those
// CHECK: [[THREAD_ID_3:[0-9]+]]: explicit_untied
// CHECK: [[THREAD_ID_3]]: task level 0: parallel_id=[[PARALLEL_ID]]
// CHECK-SAME: task_id=[[EXPLICIT_UNTIED_TASK_ID]], exit_frame={{[^\,]*}}
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_untied=268435460
// CHECK-SAME: thread_num={{[01]}}
// explicit task with final
// CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}
// CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}}
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}
// CHECK-SAME: new_task_id=[[EXPLICIT_FINAL_TASK_ID:[0-9]+]]
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_final=536870916
// CHECK-SAME: has_dependences=no
// CHECK: [[THREAD_ID_4:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: second_task_id=[[EXPLICIT_FINAL_TASK_ID]]
// CHECK: [[THREAD_ID_4]]: task level 0: parallel_id=[[PARALLEL_ID]]
// CHECK-SAME: task_id=[[EXPLICIT_FINAL_TASK_ID]]
// CHECK-SAME: exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_final=536870916
// CHECK-SAME: thread_num={{[01]}}
// nested explicit task with final and undeferred
// CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}
// CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}}
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}
// CHECK-SAME: new_task_id=[[NESTED_FINAL_UNDEFERRED_TASK_ID:[0-9]+]]
// CHECK-SAME: codeptr_ra={{0x[0-f]+}}
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_undeferred
// CHECK-SAME:|ompt_task_final=671088644
// CHECK-SAME: has_dependences=no
// CHECK: [[THREAD_ID_5:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: second_task_id=[[NESTED_FINAL_UNDEFERRED_TASK_ID]]
// CHECK: [[THREAD_ID_5]]: task level 0: parallel_id=[[PARALLEL_ID]]
// CHECK-SAME: task_id=[[NESTED_FINAL_UNDEFERRED_TASK_ID]]
// CHECK-SAME: exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_undeferred
// CHECK-SAME:|ompt_task_final=671088644
// CHECK-SAME: thread_num={{[01]}}
return 0;
}
|
GB_binop__div_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__div_int32
// A.*B function (eWiseMult): GB_AemultB__div_int32
// A*D function (colscale): GB_AxD__div_int32
// D*A function (rowscale): GB_DxB__div_int32
// C+=B function (dense accum): GB_Cdense_accumB__div_int32
// C+=b function (dense accum): GB_Cdense_accumb__div_int32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_int32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_int32
// C=scalar+B GB_bind1st__div_int32
// C=scalar+B' GB_bind1st_tran__div_int32
// C=A+scalar GB_bind2nd__div_int32
// C=A'+scalar GB_bind2nd_tran__div_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 32)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IDIV_SIGNED (x, y, 32) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_INT32 || GxB_NO_DIV_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__div_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__div_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__div_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__div_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__div_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__div_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__div_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__div_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__div_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = GB_IDIV_SIGNED (x, bij, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__div_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = GB_IDIV_SIGNED (aij, y, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (x, aij, 32) ; \
}
GrB_Info GB_bind1st_tran__div_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (aij, y, 32) ; \
}
GrB_Info GB_bind2nd_tran__div_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vector_tendencies_expl.c | /*
This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license.
Github repository: https://github.com/OpenNWP/GAME
*/
/*
In this file, the calculation of the explicit part of the momentum equation is managed.
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "../game_types.h"
#include "../spatial_operators/spatial_operators.h"
#include "../thermodynamics/thermodynamics.h"
int vector_tendencies_expl(State *state, State *state_tendency, Grid *grid, Dualgrid *dualgrid, Diagnostics *diagnostics, Forcings *forcings, Irreversible_quantities *irrev, Config *config, int slow_update_bool, int no_rk_step, double delta_t)
{
// momentum advection
if (no_rk_step == 1 || config -> totally_first_step_bool == 1)
{
// Here, the gaseous flux density is prepared for the generalized Coriolis term.
#pragma omp parallel for
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
if (config -> assume_lte == 0)
{
diagnostics -> scalar_field_placeholder[i] = density_gas(state, i);
}
else
{
diagnostics -> scalar_field_placeholder[i] = state -> rho[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + i];
}
}
scalar_times_vector(diagnostics -> scalar_field_placeholder, state -> wind, diagnostics -> flux_density, grid);
// Now, the "potential vorticity" is evaluated.
calc_pot_vort(state -> wind, diagnostics -> scalar_field_placeholder, diagnostics, grid, dualgrid);
// Now, the generalized Coriolis term is evaluated.
vorticity_flux(diagnostics -> flux_density, diagnostics -> pot_vort, forcings -> pot_vort_tend, grid, dualgrid);
// Kinetic energy is prepared for the gradient term of the Lamb transformation.
inner_product(state -> wind, state -> wind, diagnostics -> v_squared, grid);
// Taking the gradient of the kinetic energy
grad(diagnostics -> v_squared, forcings -> v_squared_grad, grid);
}
// momentum diffusion and dissipation (only updated at the first RK step and if advection is updated as well)
if (slow_update_bool == 1)
{
// horizontal momentum diffusion
if (config -> momentum_diff_h == 1)
{
hori_momentum_diffusion(state, diagnostics, irrev, config, grid, dualgrid);
}
// vertical momentum diffusion
if (config -> momentum_diff_v == 1)
{
vert_momentum_diffusion(state, diagnostics, irrev, grid, config, config -> slow_fast_ratio*delta_t);
}
// This is the explicit friction ansatz in the boundary layer from the Held-Suarez (1994) test case.
if (config -> explicit_boundary_layer == 1)
{
// some parameters
double bndr_lr_height = 1100.0; // boundary layer height
double bndr_lr_visc_sfc_land = 1.2/86400.0; // maximum friction coefficient in the boundary layer over land
double bndr_lr_visc_sfc_water = 0.8/86400.0; // maximum friction coefficient in the boundary layer over water
double bndr_lr_visc_sfc;
double e_folding_height = bndr_lr_height/M_PI;
double z_agl;
int layer_index, h_index, vector_index;
#pragma omp parallel for private(layer_index, h_index, vector_index, z_agl, bndr_lr_visc_sfc)
for (int i = 0; i < NO_OF_H_VECTORS; ++i)
{
layer_index = i/NO_OF_VECTORS_H;
h_index = i - layer_index*NO_OF_VECTORS_H;
vector_index = NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index;
// height above ground level
z_agl = grid -> z_vector[vector_index]
- 0.5*(grid -> z_vector[NO_OF_VECTORS - NO_OF_SCALARS_H + grid -> from_index[h_index]]
+ grid -> z_vector[NO_OF_VECTORS - NO_OF_SCALARS_H + grid -> to_index[h_index]]);
bndr_lr_visc_sfc = bndr_lr_visc_sfc_water;
if (grid -> is_land[grid -> from_index[h_index]] + grid -> is_land[grid -> to_index[h_index]] >= 1)
{
bndr_lr_visc_sfc = bndr_lr_visc_sfc_land;
}
// adding the boundary layer friction
if (z_agl < bndr_lr_height)
{
irrev -> friction_acc[vector_index]
+= -bndr_lr_visc_sfc*(exp(-z_agl/e_folding_height) - exp(-bndr_lr_height/e_folding_height))
/(1 - exp(-bndr_lr_height/e_folding_height))
*state -> wind[vector_index];
}
}
}
// calculation of the dissipative heating rate
if (config -> momentum_diff_h == 1 || config -> momentum_diff_v == 1 || config -> explicit_boundary_layer == 1)
{
simple_dissipation_rate(state, irrev, grid);
}
// Due to condensates, the friction acceleration needs to get a deceleration factor.
if (config -> assume_lte == 0)
{
scalar_times_vector(irrev -> pressure_gradient_decel_factor, irrev -> friction_acc, irrev -> friction_acc, grid);
}
}
// Now the explicit forces are added up.
double old_weight, new_weight;
new_weight = 1;
if (no_rk_step == 1)
{
new_weight = 0.5;
}
old_weight = 1 - new_weight;
// the weights for the pressure gradient
double old_hor_pgrad_weight, current_hor_pgrad_weight, current_ver_pgrad_weight;
current_hor_pgrad_weight = 0.5 + config -> impl_thermo_weight;
old_hor_pgrad_weight = 1 - current_hor_pgrad_weight;
current_ver_pgrad_weight = 1 - config -> impl_thermo_weight;
int layer_index, h_index;
#pragma omp parallel for private(layer_index, h_index)
for (int i = 0; i < NO_OF_VECTORS; ++i)
{
layer_index = i/NO_OF_VECTORS_PER_LAYER;
h_index = i - layer_index*NO_OF_VECTORS_PER_LAYER;
// upper and lower boundary
if (i < NO_OF_SCALARS_H || i >= NO_OF_VECTORS - NO_OF_SCALARS_H)
{
state_tendency -> wind[i] = 0;
}
// horizontal case
else if (h_index >= NO_OF_SCALARS_H
// checking for shading
&& NO_OF_LAYERS - 1 - layer_index >= grid -> no_of_shaded_points_vector[h_index - NO_OF_SCALARS_H])
{
state_tendency -> wind[i] =
old_weight*state_tendency -> wind[i] + new_weight*(
// explicit component of pressure gradient acceleration
// old time step component
old_hor_pgrad_weight*forcings -> pgrad_acc_old[i]
// current time step component
- current_hor_pgrad_weight*(forcings -> pressure_gradient_acc_neg_nl[i] + forcings -> pressure_gradient_acc_neg_l[i])
// generalized Coriolis term
+ forcings -> pot_vort_tend[i]
// kinetic energy term
- 0.5*forcings -> v_squared_grad[i]
// momentum diffusion
+ irrev -> friction_acc[i]);
}
// vertical case
else if (h_index < NO_OF_SCALARS_H
// checking for shading
&& NO_OF_LAYERS - layer_index > grid -> no_of_shaded_points_scalar[h_index])
{
state_tendency -> wind[i] =
old_weight*state_tendency -> wind[i] + new_weight*(
// explicit component of pressure gradient acceleration
// current time step component
-current_ver_pgrad_weight*(forcings -> pressure_gradient_acc_neg_nl[i] + forcings -> pressure_gradient_acc_neg_l[i])
// generalized Coriolis term
+ forcings -> pot_vort_tend[i]
// kinetic energy term
- 0.5*forcings -> v_squared_grad[i]
// momentum diffusion
+ irrev -> friction_acc[i]);
}
}
return 0;
}
|
GB_binop__atan2_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__atan2_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__atan2_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__atan2_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__atan2_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__atan2_fp64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__atan2_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__atan2_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__atan2_fp64)
// C=scalar+B GB (_bind1st__atan2_fp64)
// C=scalar+B' GB (_bind1st_tran__atan2_fp64)
// C=A+scalar GB (_bind2nd__atan2_fp64)
// C=A'+scalar GB (_bind2nd_tran__atan2_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = atan2 (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = atan2 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATAN2 || GxB_NO_FP64 || GxB_NO_ATAN2_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__atan2_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__atan2_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__atan2_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__atan2_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__atan2_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__atan2_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__atan2_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__atan2_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__atan2_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = atan2 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__atan2_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = atan2 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = atan2 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__atan2_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = atan2 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__atan2_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
wshfl.c | /* Copyright 2018-2019. Massachusetts Institute of Technology.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2018-2019 Siddharth Iyer <ssi@mit.edu>
*
* Tamir J, Uecker M, Chen W, Lai P, Alley MT, Vasanawala SS, Lustig M.
* T2 shuffling: Sharp, multicontrast, volumetric fast spin‐echo imaging.
* Magnetic resonance in medicine. 2017 Jan 1;77(1):180-95.
*
* B Bilgic, BA Gagoski, SF Cauley, AP Fan, JR Polimeni, PE Grant,
* LL Wald, and K Setsompop, Wave-CAIPI for highly accelerated 3D
* imaging. Magn Reson Med (2014) doi: 10.1002/mrm.25347
*
* Iyer S, Bilgic B, Setsompop K.
* Faster T2 shuffling with Wave.
* Presented in the session: "Signal Encoding and Decoding" at ISMRM 2018.
* https://www.ismrm.org/18/program_files/O67.htm
*/
#include <stdbool.h>
#include <complex.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/fft.h"
#include "num/init.h"
#include "num/iovec.h"
#include "num/ops.h"
#include "num/ops_p.h"
#ifdef USE_CUDA
#include "num/gpuops.h"
#endif
#include "iter/iter.h"
#include "iter/lsqr.h"
#include "iter/misc.h"
#include "linops/linop.h"
#include "linops/fmac.h"
#include "linops/someops.h"
#include "linops/decompose_complex.h"
#include "misc/debug.h"
#include "misc/mri.h"
#include "misc/utils.h"
#include "misc/mmio.h"
#include "misc/misc.h"
#include "misc/opts.h"
#include "wavelet/wavthresh.h"
#include "lowrank/lrthresh.h"
static const char usage_str[] = "<maps> <wave> <phi> <reorder> <table> <output>";
static const char help_str[] =
"Perform a wave-shuffling reconstruction.\n\n"
"Conventions:\n"
" * (sx, sy, sz) - Spatial dimensions.\n"
" * wx - Extended FOV in READ_DIM due to\n"
" wave's voxel spreading.\n"
" * (nc, md) - Number of channels and ESPIRiT's \n"
" extended-SENSE model operator\n"
" dimensions (or # of maps).\n"
" * (tf, tk) - Turbo-factor and the rank\n"
" of the temporal basis used in\n"
" shuffling.\n"
" * ntr - Number of TRs, or the number of\n"
" (ky, kz) points acquired of one\n"
" echo image.\n"
" * n - Total number of (ky, kz) points\n"
" acquired. This is equal to the\n"
" product of ntr and tf.\n\n"
"Descriptions:\n"
" * reorder is an (n by 3) index matrix such that\n"
" [ky, kz, t] = reorder(i, :) represents the\n"
" (ky, kz) kspace position of the readout line\n"
" acquired at echo number (t), and 0 <= ky < sy,\n"
" 0 <= kz < sz, 0 <= t < tf).\n"
" * table is a (wx by nc by n) matrix such that\n"
" table(:, :, k) represents the kth multichannel\n"
" kspace line.\n\n"
"Expected dimensions:\n"
" * maps - ( sx, sy, sz, nc, md, 1, 1)\n"
" * wave - ( wx, sy, sz, 1, 1, 1, 1)\n"
" * phi - ( 1, 1, 1, 1, 1, tf, tk)\n"
" * output - ( sx, sy, sz, 1, md, 1, tk)\n"
" * reorder - ( n, 3, 1, 1, 1, 1, 1)\n"
" * table - ( wx, nc, n, 1, 1, 1, 1)";
/* Helper function to print out operator dimensions. */
static void print_opdims(const struct linop_s* op)
{
const struct iovec_s* domain = linop_domain(op);
const struct iovec_s* codomain = linop_codomain(op);
debug_printf(DP_INFO, "\tDomain: [");
for (long k = 0; k < domain->N; k ++)
debug_printf(DP_INFO, "%6ld", domain->dims[k]);
debug_printf(DP_INFO, "]\n");
debug_printf(DP_INFO, "\tCodomain: [");
for (long k = 0; k < codomain->N; k ++)
debug_printf(DP_INFO, "%6ld", codomain->dims[k]);
debug_printf(DP_INFO, "]\n");
}
/* Construct sampling mask array from reorder tables. */
static void construct_mask(
long reorder_dims[DIMS], complex float* reorder,
long mask_dims[DIMS], complex float* mask)
{
long n = reorder_dims[0];
long sy = mask_dims[1];
long sz = mask_dims[2];
long y = 0;
long z = 0;
long t = 0;
for (int i = 0; i < n; i++) {
y = lround(creal(reorder[i]));
z = lround(creal(reorder[i + n]));
t = lround(creal(reorder[i + 2 * n]));
mask[(y + z * sy) + t * sy * sz] = 1;
}
}
struct kern_s {
INTERFACE(linop_data_t);
unsigned int N;
long* reorder_dims; // Dimension of the index table: ( n, 3, 1, 1, 1, 1, 1, 1)
long* phi_dims; // Dimension of the temporal basis: ( 1, 1, 1, 1, 1, tf, tk, 1)
long* table_dims; // Dimension of the data table: (wx, nc, n, 1, 1, 1, 1, 1)
long* kernel_dims; // Dimension of the kernel: ( 1, sy, sz, 1, 1, 1, tk, tk)
complex float* reorder;
complex float* phi;
complex float* kernel;
complex float* gpu_kernel;
};
static DEF_TYPEID(kern_s);
/* Go to table from coefficient-kspace with memory efficiency. */
static void kern_apply(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct kern_s* data = CAST_DOWN(kern_s, _data);
long wx = data->table_dims[0];
long sy = data->kernel_dims[1];
long sz = data->kernel_dims[2];
long nc = data->table_dims[1];
long n = data->reorder_dims[0];
long tf = data->phi_dims[5];
long tk = data->phi_dims[6];
long input_dims[] = { [0 ... DIMS - 1] = 1 };
input_dims[0] = wx;
input_dims[1] = sy;
input_dims[2] = sz;
input_dims[3] = nc;
input_dims[6] = tk;
long perm_dims[] = { [0 ... DIMS - 1] = 1 };
perm_dims[0] = wx;
perm_dims[1] = nc;
perm_dims[3] = tk;
perm_dims[4] = sy;
perm_dims[5] = sz;
complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, src);
unsigned int permute_order[DIMS] = {0, 3, 5, 6, 1, 2, 4, 7};
for (unsigned int i = 8; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, perm_dims, perm, input_dims, src, CFL_SIZE);
long vec_dims[] = {wx, nc, tf, 1};
long phi_mat_dims[] = { 1, 1, tf, tk};
long phi_in_dims[] = {wx, nc, 1, tk};
long fmac_dims[] = {wx, nc, tf, tk};
long line_dims[] = {wx, nc, 1, 1};
complex float* vec = md_alloc_sameplace(4, vec_dims, CFL_SIZE, src);
long vec_str[4];
md_calc_strides(4, vec_str, vec_dims, CFL_SIZE);
long phi_mat_str[4];
md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE);
long phi_in_str[4];
md_calc_strides(4, phi_in_str, phi_in_dims, CFL_SIZE);
long fmac_str[4];
md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE);
int y = -1;
int z = -1;
int t = -1;
for (int i = 0; i < n; i ++) {
y = lround(creal(data->reorder[i]));
z = lround(creal(data->reorder[i + n]));
t = lround(creal(data->reorder[i + 2 * n]));
md_clear(4, vec_dims, vec, CFL_SIZE);
md_zfmac2(4, fmac_dims, vec_str, vec, phi_in_str, (perm + ((wx * nc * tk) * (y + z * sy))), phi_mat_str, data->phi);
md_copy(4, line_dims, dst + (i * wx * nc), vec + (t * wx * nc), CFL_SIZE);
}
md_free(perm);
md_free(vec);
}
/* Collapse data table into the temporal basis for memory efficiency. */
static void kern_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src)
{
struct kern_s* data = CAST_DOWN(kern_s, _data);
long wx = data->table_dims[0];
long sy = data->kernel_dims[1];
long sz = data->kernel_dims[2];
long nc = data->table_dims[1];
long n = data->reorder_dims[0];
long tf = data->phi_dims[5];
long tk = data->phi_dims[6];
long perm_dims[] = { [0 ... DIMS - 1] = 1 };
perm_dims[0] = wx;
perm_dims[1] = nc;
perm_dims[3] = tk;
perm_dims[4] = sy;
perm_dims[5] = sz;
complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, dst);
md_clear(DIMS, perm_dims, perm, CFL_SIZE);
#ifdef _OPENMP
long num_threads = omp_get_max_threads();
#else
long num_threads = 1;
#endif
long vec_dims[] = {wx, nc, tf, 1};
long phi_mat_dims[] = { 1, 1, tf, tk};
long phi_out_dims[] = {wx, nc, 1, tk};
long fmac_dims[] = {wx, nc, tf, tk};
long line_dims[] = {wx, nc, 1, 1};
long vthrd_dims[] = {wx, nc, tf, 1, num_threads};
complex float* vec = md_alloc_sameplace(5, vthrd_dims, CFL_SIZE, dst);
md_clear(DIMS, vthrd_dims, vec, CFL_SIZE);
long vec_str[4];
md_calc_strides(4, vec_str, vec_dims, CFL_SIZE);
long phi_mat_str[4];
md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE);
long phi_out_str[4];
md_calc_strides(4, phi_out_str, phi_out_dims, CFL_SIZE);
long fmac_str[4];
md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE);
long flag_dims[1] = { n };
complex float* flags = md_calloc(1, flag_dims, CFL_SIZE);
#pragma omp parallel for
for (int k = 0; k < n; k ++) {
#ifdef _OPENMP
int tid = omp_get_thread_num();
#else
int tid = 0;
#endif
int y = lround(creal(data->reorder[k]));
int z = lround(creal(data->reorder[k + n]));
int t = -1;
if (0 == flags[k]) {
md_clear(4, vec_dims, vec + (wx * nc * tf * tid), CFL_SIZE);
for (int i = k; i < n; i ++) {
if ((y == lround(creal(data->reorder[i]))) && (z == lround(creal(data->reorder[i + n])))) {
flags[i] = 1;
t = lround(creal(data->reorder[i + 2 * n]));
md_copy(4, line_dims, (vec + (wx * nc * tf * tid) + t * wx * nc), (src + i * wx * nc), CFL_SIZE);
}
}
md_zfmacc2(4, fmac_dims, phi_out_str, perm + (y + z * sy) * (wx * nc * tk), vec_str, vec + (wx * nc * tf * tid), phi_mat_str, data->phi);
}
}
long out_dims[] = { [0 ... DIMS - 1] = 1 };
out_dims[0] = wx;
out_dims[1] = sy;
out_dims[2] = sz;
out_dims[3] = nc;
out_dims[6] = tk;
unsigned int permute_order[DIMS] = {0, 4, 5, 1, 6, 2, 3, 7};
for (unsigned int i = 8; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, out_dims, dst, perm_dims, perm, CFL_SIZE);
md_free(vec);
md_free(perm);
md_free(flags);
}
static void kern_normal(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct kern_s* data = CAST_DOWN(kern_s, _data);
long wx = data->table_dims[0];
long sy = data->kernel_dims[1];
long sz = data->kernel_dims[2];
long nc = data->table_dims[1];
long tk = data->phi_dims[6];
long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
input_dims[0] = wx;
input_dims[1] = sy;
input_dims[2] = sz;
input_dims[3] = nc;
input_dims[6] = tk;
long input_str[DIMS];
md_calc_strides(DIMS, input_str, input_dims, CFL_SIZE);
long output_dims[DIMS];
md_copy_dims(DIMS, output_dims, input_dims);
output_dims[6] = 1;
output_dims[7] = tk;
long output_str[DIMS];
md_calc_strides(DIMS, output_str, output_dims, CFL_SIZE);
long gpu_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, gpu_kernel_dims, data->kernel_dims);
gpu_kernel_dims[0] = wx;
gpu_kernel_dims[3] = nc;
long kernel_str[DIMS];
md_calc_strides(DIMS, kernel_str, data->kernel_dims, CFL_SIZE);
long gpu_kernel_str[DIMS];
md_calc_strides(DIMS, gpu_kernel_str, gpu_kernel_dims, CFL_SIZE);
long fmac_dims[DIMS];
md_merge_dims(DIMS, fmac_dims, input_dims, data->kernel_dims);
md_clear(DIMS, output_dims, dst, CFL_SIZE);
#ifdef USE_CUDA
if(cuda_ondevice(src))
md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, gpu_kernel_str, data->gpu_kernel);
else
#endif
md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, kernel_str, data->kernel);
}
static void kern_free(const linop_data_t* _data)
{
const struct kern_s* data = CAST_DOWN(kern_s, _data);
xfree(data->reorder_dims);
xfree(data->phi_dims);
xfree(data->table_dims);
xfree(data->kernel_dims);
#ifdef USE_CUDA
if (data->gpu_kernel != NULL)
md_free(data->gpu_kernel);
#endif
xfree(data);
}
static const struct linop_s* linop_kern_create(bool gpu_flag,
const long _reorder_dims[DIMS], complex float* reorder,
const long _phi_dims[DIMS], complex float* phi,
const long _kernel_dims[DIMS], complex float* kernel,
const long _table_dims[DIMS])
{
PTR_ALLOC(struct kern_s, data);
SET_TYPEID(kern_s, data);
PTR_ALLOC(long[DIMS], reorder_dims);
PTR_ALLOC(long[DIMS], phi_dims);
PTR_ALLOC(long[DIMS], table_dims);
PTR_ALLOC(long[DIMS], kernel_dims);
md_copy_dims(DIMS, *reorder_dims, _reorder_dims);
md_copy_dims(DIMS, *phi_dims, _phi_dims);
md_copy_dims(DIMS, *table_dims, _table_dims);
md_copy_dims(DIMS, *kernel_dims, _kernel_dims);
data->reorder_dims = *PTR_PASS(reorder_dims);
data->phi_dims = *PTR_PASS(phi_dims);
data->table_dims = *PTR_PASS(table_dims);
data->kernel_dims = *PTR_PASS(kernel_dims);
data->reorder = reorder;
data->phi = phi;
data->kernel = kernel;
data->gpu_kernel = NULL;
#ifdef USE_CUDA
if(gpu_flag) {
long repmat_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, repmat_kernel_dims, _kernel_dims);
repmat_kernel_dims[0] = _table_dims[0];
repmat_kernel_dims[3] = _table_dims[1];
long kernel_strs[DIMS];
long repmat_kernel_strs[DIMS];
md_calc_strides(DIMS, kernel_strs, _kernel_dims, CFL_SIZE);
md_calc_strides(DIMS, repmat_kernel_strs, repmat_kernel_dims, CFL_SIZE);
complex float* repmat_kernel = md_calloc(DIMS, repmat_kernel_dims, CFL_SIZE);
md_copy2(DIMS, repmat_kernel_dims, repmat_kernel_strs, repmat_kernel, kernel_strs, kernel, CFL_SIZE);
data->gpu_kernel = md_gpu_move(DIMS, repmat_kernel_dims, repmat_kernel, CFL_SIZE);
md_free(repmat_kernel);
}
#else
UNUSED(gpu_flag);
#endif
long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
input_dims[0] = _table_dims[0];
input_dims[1] = _kernel_dims[1];
input_dims[2] = _kernel_dims[2];
input_dims[3] = _table_dims[1];
input_dims[6] = _phi_dims[6];
long output_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
output_dims[0] = _table_dims[0];
output_dims[1] = _table_dims[1];
output_dims[2] = _reorder_dims[0];
const struct linop_s* K = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), kern_apply, kern_adjoint, kern_normal, NULL, kern_free);
return K;
}
struct multc_s {
INTERFACE(linop_data_t);
unsigned int nc;
unsigned int md;
const complex float* maps;
const struct linop_s* sc_op; // Single channel operator.
};
static DEF_TYPEID(multc_s);
static void multc_apply(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
// Loading single channel operator.
const struct operator_s* fwd = data->sc_op->forward;
const long* sc_inp_dims = linop_domain(data->sc_op)->dims;
const long* sc_out_dims = linop_codomain(data->sc_op)->dims;
long sx = sc_inp_dims[0];
long sy = sc_inp_dims[1];
long sz = sc_inp_dims[2];
long wx = sc_out_dims[0];
long n = sc_out_dims[2];
long nc = data->nc;
long md = data->md;
long src_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, src_dims, sc_inp_dims);
src_dims[MAPS_DIM] = md;
long dst_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, dst_dims, sc_out_dims);
dst_dims[1] = nc;
long map_dims[] = { [0 ... DIMS - 1] = 1};
map_dims[0] = sx;
map_dims[1] = sy;
map_dims[2] = sz;
map_dims[3] = nc;
map_dims[4] = md;
long single_map_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_map_dims, map_dims);
single_map_dims[COIL_DIM] = 1;
complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src);
complex float* buffer = md_alloc_sameplace(DIMS, sc_inp_dims, CFL_SIZE, src);
long tbl_dims[] = { [0 ... DIMS - 1] = 1};
tbl_dims[0] = wx;
tbl_dims[1] = n;
tbl_dims[2] = nc;
complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src);
md_clear(DIMS, tbl_dims, tbl, CFL_SIZE);
long pos[] = { [0 ... DIMS - 1] = 0 };
long zfmac_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, zfmac_dims, src_dims);
long strides_single_map[DIMS];
md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE);
long strides_src[DIMS];
md_calc_strides(DIMS, strides_src, src_dims, CFL_SIZE);
long strides_sc_inp[DIMS];
md_calc_strides(DIMS, strides_sc_inp, sc_inp_dims, CFL_SIZE);
for (long k = 0; k < data->nc; k++) {
md_clear(DIMS, single_map_dims, single_map, CFL_SIZE);
md_clear(DIMS, sc_inp_dims, buffer, CFL_SIZE);
pos[COIL_DIM] = k;
md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE);
pos[COIL_DIM] = 0;
md_zfmac2(DIMS, zfmac_dims, strides_sc_inp, buffer, strides_src, src, strides_single_map, single_map);
operator_apply(fwd, DIMS, sc_out_dims, tbl + (wx * n * k), DIMS, sc_inp_dims, buffer);
}
md_clear(DIMS, dst_dims, dst, CFL_SIZE);
unsigned int permute_order[DIMS] = {0, 2, 1};
for (unsigned int i = 3; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, dst_dims, dst, tbl_dims, tbl, CFL_SIZE);
md_free(single_map);
md_free(buffer);
md_free(tbl);
}
static void multc_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
// Loading single channel operator.
const struct operator_s* adj = data->sc_op->adjoint;
const long* sc_inp_dims = linop_codomain(data->sc_op)->dims;
const long* sc_out_dims = linop_domain(data->sc_op)->dims;
long sx = sc_out_dims[0];
long sy = sc_out_dims[1];
long sz = sc_out_dims[2];
long wx = sc_inp_dims[0];
long n = sc_inp_dims[2];
long nc = data->nc;
long md = data->md;
long src_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, src_dims, sc_inp_dims);
src_dims[1] = nc;
long dst_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, dst_dims, sc_out_dims);
dst_dims[MAPS_DIM] = md;
long map_dims[] = { [0 ... DIMS - 1] = 1};
map_dims[0] = sx;
map_dims[1] = sy;
map_dims[2] = sz;
map_dims[3] = nc;
map_dims[4] = md;
long single_map_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_map_dims, map_dims);
single_map_dims[COIL_DIM] = 1;
complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src);
complex float* buffer1 = md_alloc_sameplace(DIMS, sc_out_dims, CFL_SIZE, src);
complex float* buffer2 = md_alloc_sameplace(DIMS, dst_dims, CFL_SIZE, src);
long tbl_dims[] = { [0 ... DIMS - 1] = 1};
tbl_dims[0] = wx;
tbl_dims[2] = n;
complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src);
long pos[] = { [0 ... DIMS - 1] = 0 };
long strides_single_map[DIMS];
md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE);
long strides_sc_out[DIMS];
md_calc_strides(DIMS, strides_sc_out, sc_out_dims, CFL_SIZE);
long strides_dst[DIMS];
md_calc_strides(DIMS, strides_dst, dst_dims, CFL_SIZE);
md_clear(DIMS, dst_dims, dst, CFL_SIZE);
for (long k = 0; k < data->nc; k++) {
md_clear(DIMS, single_map_dims, single_map, CFL_SIZE);
md_clear(DIMS, sc_out_dims, buffer1, CFL_SIZE);
md_clear(DIMS, dst_dims, buffer2, CFL_SIZE);
md_clear(DIMS, tbl_dims, tbl, CFL_SIZE);
pos[1] = k;
md_slice(DIMS, 2, pos, src_dims, tbl, src, CFL_SIZE);
pos[1] = 0;
operator_apply(adj, DIMS, sc_out_dims, buffer1, DIMS, tbl_dims, tbl);
pos[COIL_DIM] = k;
md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE);
pos[COIL_DIM] = 0;
md_zfmacc2(DIMS, dst_dims, strides_dst, buffer2, strides_sc_out, buffer1, strides_single_map, single_map);
md_zadd(DIMS, dst_dims, dst, dst, buffer2);
}
md_free(single_map);
md_free(buffer1);
md_free(buffer2);
md_free(tbl);
}
static void multc_normal(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
// Loading single channel operator.
const struct operator_s* nrm = data->sc_op->normal;
const long* sc_dims = linop_domain(data->sc_op)->dims;
long sx = sc_dims[0];
long sy = sc_dims[1];
long sz = sc_dims[2];
long nc = data->nc;
long md = data->md;
long dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, dims, sc_dims);
dims[MAPS_DIM] = md;
long map_dims[] = { [0 ... DIMS - 1] = 1};
map_dims[0] = sx;
map_dims[1] = sy;
map_dims[2] = sz;
map_dims[3] = nc;
map_dims[4] = md;
long single_map_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_map_dims, map_dims);
single_map_dims[COIL_DIM] = 1;
complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src);
complex float* buffer1 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src);
complex float* buffer2 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src);
complex float* buffer3 = md_alloc_sameplace(DIMS, dims, CFL_SIZE, src);
long pos[] = { [0 ... DIMS - 1] = 0 };
long strides_single_map[DIMS];
md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE);
long strides_sc[DIMS];
md_calc_strides(DIMS, strides_sc, sc_dims, CFL_SIZE);
long strides[DIMS];
md_calc_strides(DIMS, strides, dims, CFL_SIZE);
md_clear(DIMS, dims, dst, CFL_SIZE);
for (long k = 0; k < data->nc; k++) {
md_clear(DIMS, single_map_dims, single_map, CFL_SIZE);
md_clear(DIMS, sc_dims, buffer1, CFL_SIZE);
md_clear(DIMS, sc_dims, buffer2, CFL_SIZE);
md_clear(DIMS, dims, buffer3, CFL_SIZE);
pos[COIL_DIM] = k;
md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE);
pos[COIL_DIM] = 0;
md_zfmac2(DIMS, dims, strides_sc, buffer1, strides, src, strides_single_map, single_map);
operator_apply(nrm, DIMS, sc_dims, buffer2, DIMS, sc_dims, buffer1);
md_zfmacc2(DIMS, dims, strides, buffer3, strides_sc, buffer2, strides_single_map, single_map);
md_zadd(DIMS, dims, dst, dst, buffer3);
}
md_free(single_map);
md_free(buffer1);
md_free(buffer2);
md_free(buffer3);
}
static void multc_free(const linop_data_t* _data)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
xfree(data);
}
static struct linop_s* linop_multc_create(long nc, long md, const complex float* maps, const struct linop_s* sc_op)
{
PTR_ALLOC(struct multc_s, data);
SET_TYPEID(multc_s, data);
data->nc = nc;
data->md = md;
data->maps = maps;
data->sc_op = sc_op;
long* op_inp_dims = (long*) linop_domain(sc_op)->dims;
long* op_out_dims = (long*) linop_codomain(sc_op)->dims;
long input_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, input_dims, op_inp_dims);
input_dims[MAPS_DIM] = md;
long output_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, output_dims, op_out_dims);
output_dims[1] = nc;
struct linop_s* E = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), multc_apply, multc_adjoint, multc_normal, NULL, multc_free);
return E;
}
/* Resize operator. */
static const struct linop_s* linop_wavereshape_create(long wx, long sx, long sy, long sz, long nc, long tk)
{
long input_dims[] = { [0 ... DIMS - 1] = 1};
input_dims[0] = sx;
input_dims[1] = sy;
input_dims[2] = sz;
input_dims[3] = nc;
input_dims[6] = tk;
long output_dims[DIMS];
md_copy_dims(DIMS, output_dims, input_dims);
output_dims[0] = wx;
struct linop_s* R = linop_resize_create(DIMS, output_dims, input_dims);
return R;
}
/* Fx operator. */
static const struct linop_s* linop_fx_create(long wx, long sy, long sz, long nc, long tk, bool centered)
{
long dims[] = { [0 ... DIMS - 1] = 1};
dims[0] = wx;
dims[1] = sy;
dims[2] = sz;
dims[3] = nc;
dims[6] = tk;
struct linop_s* Fx = NULL;
if (centered)
Fx = linop_fftc_create(DIMS, dims, READ_FLAG);
else
Fx = linop_fft_create(DIMS, dims, READ_FLAG);
return Fx;
}
/* Wave operator. */
static const struct linop_s* linop_wave_create(long wx, long sy, long sz, long nc, long tk, complex float* psf)
{
long dims[] = { [0 ... DIMS - 1] = 1};
dims[0] = wx;
dims[1] = sy;
dims[2] = sz;
dims[3] = nc;
dims[6] = tk;
struct linop_s* W = linop_cdiag_create(DIMS, dims, FFT_FLAGS, psf);
return W;
}
/* Fyz operator. */
static const struct linop_s* linop_fyz_create(long wx, long sy, long sz, long nc, long tk, bool centered)
{
long dims[] = { [0 ... DIMS - 1] = 1};
dims[0] = wx;
dims[1] = sy;
dims[2] = sz;
dims[3] = nc;
dims[6] = tk;
struct linop_s* Fyz = NULL;
if (centered)
Fyz = linop_fftc_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG);
else
Fyz = linop_fft_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG);
return Fyz;
}
/* Construction sampling temporal kernel.*/
static void construct_kernel(
long mask_dims[DIMS], complex float* mask,
long phi_dims[DIMS], complex float* phi,
long kern_dims[DIMS], complex float* kern)
{
long sy = mask_dims[1];
long sz = mask_dims[2];
long tf = phi_dims[5];
long tk = phi_dims[6];
long cvec_dims[] = { [0 ... DIMS - 1] = 1 };
cvec_dims[6] = tk;
long cvec_str[DIMS];
md_calc_strides(DIMS, cvec_str, cvec_dims, CFL_SIZE);
complex float cvec[tk];
long tvec_dims[] = { [0 ... DIMS - 1] = 1 };
tvec_dims[5] = tf;
long tvec_str[DIMS];
md_calc_strides(DIMS, tvec_str, tvec_dims, CFL_SIZE);
complex float mvec[tf];
complex float tvec1[tf];
complex float tvec2[tf];
long phi_str[DIMS];
md_calc_strides(DIMS, phi_str, phi_dims, CFL_SIZE);
long out_dims[] = { [0 ... DIMS - 1] = 1 };
out_dims[0] = tk;
out_dims[1] = sy;
out_dims[2] = sz;
out_dims[3] = tk;
complex float* out = md_calloc(DIMS, out_dims, CFL_SIZE);
for (int y = 0; y < sy; y ++) {
for (int z = 0; z < sz; z ++) {
for (int t = 0; t < tf; t ++)
mvec[t] = mask[(y + sy * z) + (sy * sz) * t];
for (int t = 0; t < tk; t ++) {
cvec[t] = 1;
md_clear(DIMS, tvec_dims, tvec1, CFL_SIZE);
md_zfmac2(DIMS, phi_dims, tvec_str, tvec1, cvec_str, cvec, phi_str, phi);
md_clear(DIMS, tvec_dims, tvec2, CFL_SIZE);
md_zfmac2(DIMS, tvec_dims, tvec_str, tvec2, tvec_str, tvec1, tvec_str, mvec);
md_clear(DIMS, cvec_dims, out + y * tk + z * sy * tk + t * sy * sz * tk, CFL_SIZE);
md_zfmacc2(DIMS, phi_dims, cvec_str, out + y * tk + z * sy * tk + t * sy * sz * tk,
tvec_str, tvec2, phi_str, phi);
cvec[t] = 0;
}
}
}
unsigned int permute_order[DIMS] = {4, 1, 2, 5, 6, 7, 3, 0};
for (unsigned int i = 8; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, kern_dims, kern, out_dims, out, CFL_SIZE);
md_free(out);
}
static void fftmod_apply(long sy, long sz,
long reorder_dims[DIMS], complex float* reorder,
long table_dims[DIMS], complex float* table,
long maps_dims[DIMS], complex float* maps)
{
long wx = table_dims[0];
long nc = table_dims[1];
fftmod(DIMS, table_dims, READ_FLAG, table, table);
fftmod(DIMS, maps_dims, FFT_FLAGS, maps, maps);
long y = -1;
long z = -1;
double dy = ((double) sy/2)/((double) sy);
double dz = ((double) sz/2)/((double) sz);
complex float py = 1;
complex float pz = 1;
long dims[] = { [0 ... DIMS] = 1};
dims[0] = wx;
dims[1] = nc;
long n = reorder_dims[0];
for (long k = 0; k < n; k++) {
y = lround(creal(reorder[k]));
z = lround(creal(reorder[k + n]));
py = cexp(2.i * M_PI * dy * y);
pz = cexp(2.i * M_PI * dz * z);
md_zsmul(DIMS, dims, table + k * wx * nc, table + k * wx * nc, py * pz);
}
}
enum algo_t { CG, IST, FISTA };
int main_wshfl(int argc, char* argv[])
{
double start_time = timestamp();
float lambda = 1E-5;
int maxiter = 300;
int blksize = 8;
float step = 0.5;
float tol = 1.E-3;
bool llr = false;
bool wav = false;
bool fista = false;
bool hgwld = false;
float cont = 1;
float eval = -1;
const char* fwd = NULL;
const char* x0 = NULL;
int gpun = -1;
bool dcx = false;
bool pf = false;
const struct opt_s opts[] = {
OPT_FLOAT( 'r', &lambda, "lambda", "Soft threshold lambda for wavelet or locally low rank."),
OPT_INT( 'b', &blksize, "blkdim", "Block size for locally low rank."),
OPT_INT( 'i', &maxiter, "mxiter", "Maximum number of iterations."),
OPT_FLOAT( 's', &step, "stepsz", "Step size for iterative method."),
OPT_FLOAT( 'c', &cont, "cntnu", "Continuation value for IST/FISTA."),
OPT_FLOAT( 't', &tol, "toler", "Tolerance convergence condition for iterative method."),
OPT_FLOAT( 'e', &eval, "eigvl", "Maximum eigenvalue of normal operator, if known."),
OPT_STRING('F', &fwd, "frwrd", "Go from shfl-coeffs to data-table. Pass in coeffs path."),
OPT_STRING('O', &x0, "initl", "Initialize reconstruction with guess."),
OPT_INT( 'g', &gpun, "gpunm", "GPU device number."),
OPT_SET( 'f', &fista, "Reconstruct using FISTA instead of IST."),
OPT_SET( 'H', &hgwld, "Use hogwild in IST/FISTA."),
OPT_SET( 'v', &dcx, "Split coefficients to real and imaginary components."),
OPT_SET( 'w', &wav, "Use wavelet."),
OPT_SET( 'l', &llr, "Use locally low rank across temporal coefficients."),
OPT_SET( 'p', &pf, "Use locally low rank and real-imaginary components for partial fourier."),
};
cmdline(&argc, argv, 6, 6, usage_str, help_str, ARRAY_SIZE(opts), opts);
if (pf)
dcx = true;
debug_printf(DP_INFO, "Loading data... ");
long maps_dims[DIMS];
complex float* maps = load_cfl(argv[1], DIMS, maps_dims);
long wave_dims[DIMS];
complex float* wave = load_cfl(argv[2], DIMS, wave_dims);
long phi_dims[DIMS];
complex float* phi = load_cfl(argv[3], DIMS, phi_dims);
long reorder_dims[DIMS];
complex float* reorder = load_cfl(argv[4], DIMS, reorder_dims);
long table_dims[DIMS];
complex float* table = load_cfl(argv[5], DIMS, table_dims);
debug_printf(DP_INFO, "Done.\n");
if (gpun >= 0)
num_init_gpu_device(gpun);
else
num_init();
int wx = wave_dims[0];
int sx = maps_dims[0];
int sy = maps_dims[1];
int sz = maps_dims[2];
int nc = maps_dims[3];
int md = maps_dims[4];
int tf = phi_dims[5];
int tk = phi_dims[6];
debug_printf(DP_INFO, "Constructing sampling mask from reorder table... ");
long mask_dims[] = { [0 ... DIMS - 1] = 1 };
mask_dims[1] = sy;
mask_dims[2] = sz;
mask_dims[5] = tf;
complex float* mask = md_calloc(DIMS, mask_dims, CFL_SIZE);
construct_mask(reorder_dims, reorder, mask_dims, mask);
debug_printf(DP_INFO, "Done.\n");
debug_printf(DP_INFO, "Constructing sampling-temporal kernel... ");
long kernel_dims[] = { [0 ... DIMS - 1] = 1 };
kernel_dims[1] = sy;
kernel_dims[2] = sz;
kernel_dims[6] = tk;
kernel_dims[7] = tk;
complex float* kernel = md_calloc(DIMS, kernel_dims, CFL_SIZE);
construct_kernel(mask_dims, mask, phi_dims, phi, kernel_dims, kernel);
md_free(mask);
debug_printf(DP_INFO, "Done.\n");
long coeff_dims[] = { [0 ... DIMS - 1] = 1 };
coeff_dims[0] = sx;
coeff_dims[1] = sy;
coeff_dims[2] = sz;
coeff_dims[4] = md;
coeff_dims[6] = tk;
coeff_dims[8] = dcx ? 2 : 1;
debug_printf(DP_INFO, "Creating single channel linear operators:\n");
double t1;
double t2;
t1 = timestamp();
const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk);
t2 = timestamp();
debug_printf(DP_INFO, "\tR: %f seconds.\n", t2 - t1);
t1 = timestamp();
const struct linop_s* Fx = linop_fx_create(wx, sy, sz, 1, tk, false);
t2 = timestamp();
debug_printf(DP_INFO, "\tFx: %f seconds.\n", t2 - t1);
t1 = timestamp();
const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave);
t2 = timestamp();
debug_printf(DP_INFO, "\tW: %f seconds.\n", t2 - t1);
t1 = timestamp();
const struct linop_s* Fyz = linop_fyz_create(wx, sy, sz, 1, tk, false);
t2 = timestamp();
debug_printf(DP_INFO, "\tFyz: %f seconds.\n", t2 - t1);
t1 = timestamp();
long single_channel_table_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_channel_table_dims, table_dims);
single_channel_table_dims[1] = 1;
const struct linop_s* K = linop_kern_create(gpun >= 0, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims);
t2 = timestamp();
debug_printf(DP_INFO, "\tK: %f seconds.\n", t2 - t1);
struct linop_s* A_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF(
R, Fx), W), Fyz), K);
debug_printf(DP_INFO, "Single channel forward operator information:\n");
print_opdims(A_sc);
if (eval < 0)
#ifdef USE_CUDA
eval = (gpun >= 0) ? estimate_maxeigenval_gpu(A_sc->normal) : estimate_maxeigenval(A_sc->normal);
#else
eval = estimate_maxeigenval(A_sc->normal);
#endif
debug_printf(DP_INFO, "\tMax eval: %.2e\n", eval);
step /= eval;
struct linop_s* A = linop_multc_create(nc, md, maps, A_sc);
debug_printf(DP_INFO, "Overall forward linear operator information:\n");
print_opdims(A);
if (fwd != NULL) {
debug_printf(DP_INFO, "Going from coefficients to data table... ");
complex float* coeffs_to_fwd = load_cfl(fwd, DIMS, coeff_dims);
complex float* table_forward = create_cfl(argv[6], DIMS, table_dims);
const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk);
const struct linop_s* CFx = linop_fx_create( wx, sy, sz, 1, tk, true);
const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave);
const struct linop_s* CFyz = linop_fyz_create(wx, sy, sz, 1, tk, true);
const struct linop_s* K = linop_kern_create(gpun >= 0, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims);
struct linop_s* AC_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF(
R, CFx), W), CFyz), K);
struct linop_s* AC = linop_multc_create(nc, md, maps, AC_sc);
operator_apply(AC->forward, DIMS, table_dims, table_forward, DIMS, coeff_dims, coeffs_to_fwd);
debug_printf(DP_INFO, "Done.\n");
debug_printf(DP_INFO, "Cleaning up... ");
linop_free(AC);
linop_free(AC_sc);
md_free(kernel);
unmap_cfl(DIMS, maps_dims, maps);
unmap_cfl(DIMS, wave_dims, wave);
unmap_cfl(DIMS, phi_dims, phi);
unmap_cfl(DIMS, reorder_dims, reorder);
unmap_cfl(DIMS, table_dims, table);
unmap_cfl(DIMS, table_dims, table_forward);
debug_printf(DP_INFO, "Done.\n");
return 0;
}
if (dcx) {
debug_printf(DP_INFO, "\tSplitting result into real and imaginary components.\n");
struct linop_s* tmp = A;
struct linop_s* dcxop = linop_decompose_complex_create(DIMS, ITER_DIM, linop_domain(A)->dims);
A = linop_chain(dcxop, tmp);
linop_free(dcxop);
linop_free(tmp);
}
debug_printf(DP_INFO, "Normalizing data table and applying fftmod to table and maps... ");
float norm = md_znorm(DIMS, table_dims, table);
md_zsmul(DIMS, table_dims, table, table, 1. / norm);
fftmod_apply(sy, sz, reorder_dims, reorder, table_dims, table, maps_dims, maps);
debug_printf(DP_INFO, "Done.\n");
const struct operator_p_s* T = NULL;
long blkdims[MAX_LEV][DIMS];
long minsize[] = { [0 ... DIMS - 1] = 1 };
minsize[0] = MIN(sx, 16);
minsize[1] = MIN(sy, 16);
minsize[2] = MIN(sz, 16);
unsigned int WAVFLAG = (sx > 1) * READ_FLAG | (sy > 1) * PHS1_FLAG | (sz > 2) * PHS2_FLAG;
enum algo_t algo = CG;
if ((wav) || (llr) || (pf)) {
algo = (fista) ? FISTA : IST;
if (wav) {
debug_printf(DP_INFO, "Creating wavelet threshold operator... ");
T = prox_wavelet_thresh_create(DIMS, coeff_dims, WAVFLAG, 0u, minsize, lambda, true);
} else if (llr) {
debug_printf(DP_INFO, "Creating locally low rank threshold operator across coeff and real-imag... ");
llr_blkdims(blkdims, ~(COEFF_FLAG | ITER_FLAG), coeff_dims, blksize);
T = lrthresh_create(coeff_dims, true, ~(COEFF_FLAG | ITER_FLAG), (const long (*)[])blkdims, lambda, false, false, false);
} else {
assert(dcx);
debug_printf(DP_INFO, "Creating locally low rank threshold operator across real-imag... ");
llr_blkdims(blkdims, ~ITER_FLAG, coeff_dims, blksize);
T = lrthresh_create(coeff_dims, true, ~ITER_FLAG, (const long (*)[])blkdims, lambda, false, false, false);
}
debug_printf(DP_INFO, "Done.\n");
}
italgo_fun2_t italgo = iter2_call_iter;
struct iter_call_s iter2_data;
SET_TYPEID(iter_call_s, &iter2_data);
iter_conf* iconf = CAST_UP(&iter2_data);
struct iter_conjgrad_conf cgconf = iter_conjgrad_defaults;
struct iter_fista_conf fsconf = iter_fista_defaults;
struct iter_ist_conf isconf = iter_ist_defaults;
switch(algo) {
case IST:
debug_printf(DP_INFO, "Using IST.\n");
debug_printf(DP_INFO, "\tLambda: %0.2e\n", lambda);
debug_printf(DP_INFO, "\tMaximum iterations: %d\n", maxiter);
debug_printf(DP_INFO, "\tStep size: %0.2e\n", step);
debug_printf(DP_INFO, "\tHogwild: %d\n", (int) hgwld);
debug_printf(DP_INFO, "\tTolerance: %0.2e\n", tol);
debug_printf(DP_INFO, "\tContinuation: %0.2e\n", cont);
isconf = iter_ist_defaults;
isconf.step = step;
isconf.maxiter = maxiter;
isconf.tol = tol;
isconf.continuation = cont;
isconf.hogwild = hgwld;
iter2_data.fun = iter_ist;
iter2_data._conf = CAST_UP(&isconf);
break;
case FISTA:
debug_printf(DP_INFO, "Using FISTA.\n");
debug_printf(DP_INFO, "\tLambda: %0.2e\n", lambda);
debug_printf(DP_INFO, "\tMaximum iterations: %d\n", maxiter);
debug_printf(DP_INFO, "\tStep size: %0.2e\n", step);
debug_printf(DP_INFO, "\tHogwild: %d\n", (int) hgwld);
debug_printf(DP_INFO, "\tTolerance: %0.2e\n", tol);
debug_printf(DP_INFO, "\tContinuation: %0.2e\n", cont);
fsconf = iter_fista_defaults;
fsconf.maxiter = maxiter;
fsconf.step = step;
fsconf.hogwild = hgwld;
fsconf.tol = tol;
fsconf.continuation = cont;
iter2_data.fun = iter_fista;
iter2_data._conf = CAST_UP(&fsconf);
break;
default:
case CG:
debug_printf(DP_INFO, "Using CG.\n");
debug_printf(DP_INFO, "\tMaximum iterations: %d\n", maxiter);
debug_printf(DP_INFO, "\tTolerance: %0.2e\n", tol);
cgconf = iter_conjgrad_defaults;
cgconf.maxiter = maxiter;
cgconf.l2lambda = 0;
cgconf.tol = tol;
iter2_data.fun = iter_conjgrad;
iter2_data._conf = CAST_UP(&cgconf);
break;
}
complex float* init = NULL;
if (x0 != NULL) {
debug_printf(DP_INFO, "Loading in initial guess... ");
init = load_cfl(x0, DIMS, coeff_dims);
debug_printf(DP_INFO, "Done.\n");
}
debug_printf(DP_INFO, "Reconstruction... ");
complex float* recon = create_cfl(argv[6], DIMS, coeff_dims);
struct lsqr_conf lsqr_conf = { 0., gpun >= 0 };
double recon_start = timestamp();
const struct operator_p_s* J = lsqr2_create(&lsqr_conf, italgo, iconf, (const float*) init, A, NULL, 1, &T, NULL, NULL);
operator_p_apply(J, 1., DIMS, coeff_dims, recon, DIMS, table_dims, table);
double recon_end = timestamp();
debug_printf(DP_INFO, "Done.\nReconstruction time: %f seconds.\n", recon_end - recon_start);
debug_printf(DP_INFO, "Cleaning up and saving result... ");
operator_p_free(J);
linop_free(A);
linop_free(A_sc);
md_free(kernel);
unmap_cfl(DIMS, maps_dims, maps);
unmap_cfl(DIMS, wave_dims, wave);
unmap_cfl(DIMS, phi_dims, phi);
unmap_cfl(DIMS, reorder_dims, reorder);
unmap_cfl(DIMS, table_dims, table);
unmap_cfl(DIMS, coeff_dims, recon);
if (x0 != NULL)
unmap_cfl(DIMS, coeff_dims, init);
debug_printf(DP_INFO, "Done.\n");
double end_time = timestamp();
debug_printf(DP_INFO, "Total time: %f seconds.\n", end_time - start_time);
return 0;
}
|
mhpTest2.c | int foo();
int bar();
int main() {
#pragma omp parallel
{
int x;
x = x + 1;
x = foo() + bar();
}
}
|
csrgemv_task.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
#pragma once
#include <cstring>
#include <thread>
#include "bof_types.h"
#include "bof_utils.h"
#include "tasks/task.h"
namespace flash {
class CsrGemvNoTransInMem : public BaseTask {
// matrix specs
MKL_INT* ia;
flash_ptr<MKL_INT> ja;
flash_ptr<FPTYPE> a;
FBLAS_UINT dim;
FBLAS_UINT a_nrows;
FBLAS_UINT nnzs;
// vector specs
FPTYPE* in;
FPTYPE* out;
public:
CsrGemvNoTransInMem(FBLAS_UINT start_row, FBLAS_UINT a_rows,
FBLAS_UINT a_cols, FBLAS_UINT a_rblk_size, MKL_INT* ia,
flash_ptr<MKL_INT> ja, flash_ptr<FPTYPE> a,
FPTYPE* v_in, FPTYPE* v_out) {
// matrix specs
this->a_nrows = std::min(a_rows - start_row, a_rblk_size);
this->dim = std::max(a_nrows, a_cols);
this->ja = ja + ia[start_row];
this->a = a + ia[start_row];
// copy over offsets and remove start offset
this->ia = new MKL_INT[this->dim + 1];
memcpy(this->ia, ia + start_row, (this->a_nrows + 1) * sizeof(MKL_INT));
for (FBLAS_UINT i = 1; i <= this->a_nrows; i++) {
this->ia[i] -= this->ia[0];
}
this->ia[0] = 0;
for (FBLAS_UINT i = this->a_nrows + 1; i <= this->dim; i++) {
this->ia[i] = this->ia[this->a_nrows];
}
this->in = v_in;
this->out = v_out + start_row;
// add reads
this->nnzs = this->ia[this->dim] - this->ia[0];
StrideInfo sinfo;
sinfo.stride = 0;
sinfo.n_strides = 1;
sinfo.len_per_stride = this->nnzs * sizeof(FPTYPE);
this->add_read(this->a, sinfo);
sinfo.len_per_stride = this->nnzs * sizeof(MKL_INT);
this->add_read(this->ja, sinfo);
}
void execute() {
MKL_INT* ja_ptr = (MKL_INT*) this->in_mem_ptrs[this->ja];
FPTYPE* a_ptr = (FPTYPE*) this->in_mem_ptrs[this->a];
FPTYPE* v_out = nullptr;
if (this->dim > this->a_nrows) {
v_out = new FPTYPE[this->dim];
} else {
v_out = this->out;
}
// MKL parameters;
char transa = 'N';
MKL_INT m = this->dim;
// execute MKL call
mkl_csrgemv(&transa, &m, a_ptr, this->ia, ja_ptr, this->in, v_out);
if (this->dim > this->a_nrows) {
memcpy(this->out, v_out, this->a_nrows * sizeof(FPTYPE));
delete[] v_out;
}
// free memory for ia
delete[] this->ia;
}
FBLAS_UINT size() {
if (this->dim > this->a_nrows) {
return (this->nnzs * (sizeof(MKL_INT) + sizeof(FPTYPE)) +
(this->dim + this->a_nrows) * sizeof(FPTYPE));
} else {
return (this->nnzs * (sizeof(MKL_INT) + sizeof(FPTYPE)) +
(this->a_nrows * sizeof(FPTYPE)));
}
}
};
class CsrGemvTransInMem : public BaseTask {
// matrix specs
MKL_INT* ia;
flash_ptr<MKL_INT> ja;
flash_ptr<FPTYPE> a;
FBLAS_UINT blk_size;
FBLAS_UINT a_rows;
FBLAS_UINT a_cols;
FBLAS_UINT dim;
FBLAS_UINT nnzs;
// `atomic` access to output array
std::mutex& mut;
// vector specs
FPTYPE* in;
FPTYPE* out;
public:
CsrGemvTransInMem(FBLAS_UINT start_row, FBLAS_UINT a_rows,
FBLAS_UINT a_cols, FBLAS_UINT a_rblk_size, MKL_INT* ia,
flash_ptr<MKL_INT> ja, flash_ptr<FPTYPE> a, FPTYPE* v_in,
FPTYPE* v_out, std::mutex& sync_mut)
: mut(std::ref(sync_mut)) {
// matrix specs
this->blk_size = std::min(a_rows - start_row, a_rblk_size);
this->dim = std::max(a_rows, a_cols);
this->ja = ja + ia[start_row];
this->a = a + ia[start_row];
// copy over offsets and remove start offset
this->ia = new MKL_INT[this->dim + 1];
memcpy(this->ia, ia + start_row, (this->blk_size + 1) * sizeof(MKL_INT));
for (FBLAS_UINT i = 1; i <= this->blk_size; i++) {
this->ia[i] -= this->ia[0];
}
this->ia[0] = 0;
for (FBLAS_UINT i = this->blk_size + 1; i <= this->dim; i++) {
this->ia[i] = this->ia[this->blk_size];
}
this->in = v_in + start_row;
this->out = v_out;
this->a_rows = a_rows;
this->a_cols = a_cols;
// add reads
this->nnzs = this->ia[this->dim] - this->ia[0];
StrideInfo sinfo;
sinfo.stride = 0;
sinfo.n_strides = 1;
sinfo.len_per_stride = this->nnzs * sizeof(FPTYPE);
this->add_read(this->a, sinfo);
sinfo.len_per_stride = this->nnzs * sizeof(MKL_INT);
this->add_read(this->ja, sinfo);
}
void execute() {
MKL_INT* ja_ptr = (MKL_INT*) this->in_mem_ptrs[this->ja];
FPTYPE* a_ptr = (FPTYPE*) this->in_mem_ptrs[this->a];
// prepare MKL parameters;
char transa = 'T';
MKL_INT m = (MKL_INT) this->dim;
FPTYPE* v_out = new FPTYPE[this->dim];
memset(v_out, 0, this->dim * sizeof(FPTYPE));
FPTYPE* v_in = new FPTYPE[this->dim];
memset(v_in, 0, this->dim * sizeof(FPTYPE));
memcpy(v_in, this->in, this->blk_size * sizeof(FPTYPE));
// execute MKL call
mkl_csrgemv(&transa, &m, a_ptr, this->ia, ja_ptr, v_in, v_out);
delete[] this->ia;
delete[] v_in;
// lock and add to existing result
{
std::unique_lock<std::mutex> lk(this->mut);
#pragma omp parallel for
for (FBLAS_UINT i = 0; i < this->a_cols; i++) {
this->out[i] += v_out[i];
}
}
delete[] v_out;
}
FBLAS_UINT size() {
return (this->nnzs * (sizeof(MKL_INT) + sizeof(FPTYPE))) +
(this->dim * (sizeof(FPTYPE) + sizeof(MKL_INT))) +
(this->dim > this->blk_size ? this->dim * sizeof(FPTYPE) : 0);
}
};
} // namespace flash
|
interpolate_v2_op.h | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/hostdevice.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
template <typename T, size_t D, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
using Tensor = framework::Tensor;
using DataLayout = framework::DataLayout;
inline std::vector<int> get_new_shape(
const std::vector<const Tensor*>& list_new_shape_tensor) {
// get tensor from
std::vector<int> vec_new_shape;
for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) {
auto tensor = list_new_shape_tensor[i];
PADDLE_ENFORCE_EQ(tensor->dims(), phi::make_ddim({1}),
platform::errors::InvalidArgument(
"The shape of dimension tensor should be [1],"
"but received d%.",
tensor->dims()));
if (platform::is_gpu_place(tensor->place())) {
framework::Tensor temp;
paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &temp);
vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>()));
} else {
vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>()));
}
}
return vec_new_shape;
}
template <typename T>
inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) {
std::vector<T> vec_new_data;
auto* new_data = new_data_tensor->data<T>();
framework::Tensor cpu_starts_tensor;
if (platform::is_gpu_place(new_data_tensor->place())) {
paddle::framework::TensorCopySync(*new_data_tensor, platform::CPUPlace(),
&cpu_starts_tensor);
new_data = cpu_starts_tensor.data<T>();
}
#ifdef PADDLE_WITH_ASCEND_CL
if (platform::is_npu_place(new_data_tensor->place())) {
paddle::framework::TensorCopySync(*new_data_tensor, platform::CPUPlace(),
&cpu_starts_tensor);
new_data = cpu_starts_tensor.data<T>();
}
#endif
#ifdef PADDLE_WITH_XPU
if (platform::is_xpu_place(new_data_tensor->place())) {
paddle::framework::TensorCopySync(*new_data_tensor, platform::CPUPlace(),
&cpu_starts_tensor);
new_data = cpu_starts_tensor.data<T>();
}
#endif
vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel());
return vec_new_data;
}
inline void ExtractNCDWH(const framework::DDim& dims,
const DataLayout& data_layout, int* N, int* C, int* D,
int* H, int* W) {
*N = dims[0];
if (dims.size() == 3) {
*C = data_layout == DataLayout::kNCHW ? dims[1] : dims[2];
*D = 1;
*H = 1;
*W = data_layout == DataLayout::kNCHW ? dims[2] : dims[1];
} else if (dims.size() == 4) {
*C = data_layout == DataLayout::kNCHW ? dims[1] : dims[3];
*D = 1;
*H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1];
*W = data_layout == DataLayout::kNCHW ? dims[3] : dims[2];
} else {
*C = data_layout == DataLayout::kNCHW ? dims[1] : dims[4];
*D = data_layout == DataLayout::kNCHW ? dims[2] : dims[1];
*H = data_layout == DataLayout::kNCHW ? dims[3] : dims[2];
*W = data_layout == DataLayout::kNCHW ? dims[4] : dims[3];
}
}
template <typename T>
static void NearestNeighborInterpolate(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int n, const int c,
const int out_h, const int out_w,
const bool align_corners,
const DataLayout& data_layout) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
for (int k = 0; k < out_h; k++) { // loop for images
int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5)
: static_cast<int>(ratio_h * k);
for (int l = 0; l < out_w; l++) {
int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5)
: static_cast<int>(ratio_w * l);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
if (data_layout == DataLayout::kNCHW) {
output_t(i, j, k, l) = input_t(i, j, in_k, in_l);
} else {
output_t(i, k, l, j) = input_t(i, in_k, in_l, j);
}
}
}
}
}
}
template <typename T>
static void NearestNeighbor3DInterpolate(
const Tensor& input, Tensor* output, const float ratio_d,
const float ratio_h, const float ratio_w, const int n, const int c,
const int out_d, const int out_h, const int out_w, const bool align_corners,
const DataLayout& data_layout) {
auto input_t = EigenTensor<T, 5>::From(input);
auto output_t = EigenTensor<T, 5>::From(*output);
for (int d = 0; d < out_d; d++) { // loop for images
int in_d = (align_corners) ? static_cast<int>(ratio_d * d + 0.5)
: static_cast<int>(ratio_d * d);
for (int k = 0; k < out_h; k++) {
int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5)
: static_cast<int>(ratio_h * k);
for (int l = 0; l < out_w; l++) {
int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5)
: static_cast<int>(ratio_w * l);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
if (data_layout == DataLayout::kNCHW) {
output_t(i, j, d, k, l) = input_t(i, j, in_d, in_k, in_l);
} else { // NDHWC
output_t(i, d, k, l, j) = input_t(i, in_d, in_k, in_l, j);
}
}
}
}
}
}
}
template <typename T>
static void LinearInterpolation(const Tensor& input, Tensor* output,
const float ratio_w, const int in_w,
const int n, const int c, const int out_w,
const bool align_corners, const bool align_mode,
const DataLayout data_layout) {
auto input_t = EigenTensor<T, 3>::From(input);
auto output_t = EigenTensor<T, 3>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0; // w
int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda
float d_e = 1.f - d_w; // w2lambda
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(3)
#endif
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
for (int l = 0; l < out_w; l++) {
// linear interpolation
T out_t;
if (data_layout == DataLayout::kNCHW) {
out_t = input_t(i, j, vx_w[l]) * vd_e[l] +
input_t(i, j, vx_e[l]) * vd_w[l];
output_t(i, j, l) = out_t;
} else {
out_t = input_t(i, vx_w[l], j) * vd_e[l] +
input_t(i, vx_e[l], j) * vd_w[l];
output_t(i, l, j) = out_t;
}
}
}
}
}
template <typename T>
static void LinearInterpolationGrad(const Tensor& output_grad,
Tensor* input_grad, const float ratio_w,
const int in_w, const int n, const int c,
const int out_w, const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 3>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 3>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0; // w
int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda
float d_e = 1.f - d_w; // w2lambda
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// linear interpolation grad
if (data_layout == DataLayout::kNCHW) {
const T grad = output_grad_t(i, j, l);
input_grad_t(i, j, x_w) += static_cast<T>(grad * d_e);
input_grad_t(i, j, x_e) += static_cast<T>(grad * d_w);
} else {
const T grad = output_grad_t(i, l, j);
input_grad_t(i, x_w, j) += static_cast<T>(grad * d_e);
input_grad_t(i, x_e, j) += static_cast<T>(grad * d_w);
}
}
}
}
}
template <typename T>
static void BilinearInterpolation(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w,
const bool align_corners,
const bool align_mode,
const DataLayout data_layout) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vy_n, vy_s;
std::vector<float> vd_n, vd_s;
vy_n.reserve(out_h);
vy_s.reserve(out_h);
vd_n.reserve(out_h);
vd_s.reserve(out_h);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int k = 0; k < out_h; k++) {
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
{
vy_n[k] = y_n;
vy_s[k] = y_s;
vd_n[k] = d_n;
vd_s[k] = d_s;
}
}
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = (align_mode == 0 && !align_corners)
? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(4)
#endif
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
for (int k = 0; k < out_h; k++) { // loop for images
for (int l = 0; l < out_w; l++) {
// bilinear interpolation
T out_t;
if (data_layout == DataLayout::kNCHW) {
out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] +
input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] +
input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] +
input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l];
output_t(i, j, k, l) = out_t;
} else {
out_t = input_t(i, vy_n[k], vx_w[l], j) * vd_s[k] * vd_e[l] +
input_t(i, vy_s[k], vx_w[l], j) * vd_n[k] * vd_e[l] +
input_t(i, vy_n[k], vx_e[l], j) * vd_s[k] * vd_w[l] +
input_t(i, vy_s[k], vx_e[l], j) * vd_n[k] * vd_w[l];
output_t(i, k, l, j) = out_t;
}
}
}
}
}
}
template <typename T>
static void TrilinearInterpolation(
const Tensor& input, Tensor* output, const float ratio_d,
const float ratio_h, const float ratio_w, const int in_d, const int in_h,
const int in_w, const int n, const int c, const int out_d, const int out_h,
const int out_w, const bool align_corners, const bool align_mode,
const DataLayout& data_layout) {
auto input_t = EigenTensor<T, 5>::From(input);
auto output_t = EigenTensor<T, 5>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vt_f, vt_b;
std::vector<float> vd_f, vd_b;
vt_f.reserve(out_d);
vt_b.reserve(out_d);
vd_f.reserve(out_d);
vd_b.reserve(out_d);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int j = 0; j < out_d; j++) {
int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5)
: static_cast<int>(ratio_d * j);
t_f = (t_f > 0) ? t_f : 0;
int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1);
float idx_src_t = ratio_d * (j + 0.5) - 0.5;
idx_src_t = (idx_src_t > 0) ? idx_src_t : 0;
float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f;
float d_b = 1.f - d_f;
{
vt_f[j] = t_f;
vt_b[j] = t_b;
vd_f[j] = d_f;
vd_b[j] = d_b;
}
}
std::vector<int> vy_n, vy_s;
std::vector<float> vd_n, vd_s;
vy_n.reserve(out_h);
vy_s.reserve(out_h);
vd_n.reserve(out_h);
vd_s.reserve(out_h);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int k = 0; k < out_h; k++) {
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
{
vy_n[k] = y_n;
vy_s[k] = y_s;
vd_n[k] = d_n;
vd_s[k] = d_s;
}
}
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = (align_mode == 0 && !align_corners)
? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(5)
#endif
for (int b = 0; b < n; b++) { // loop for batches
for (int i = 0; i < c; i++) { // loop for channels
for (int j = 0; j < out_d; j++) { // loop for D, H, W
for (int k = 0; k < out_h; k++) {
for (int l = 0; l < out_w; l++) {
// trilinear interpolation
if (data_layout == DataLayout::kNCHW) {
T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] *
vd_s[k] * vd_e[l] +
input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] *
vd_s[k] * vd_w[l] +
input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] *
vd_n[k] * vd_e[l] +
input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] *
vd_n[k] * vd_w[l] +
input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] *
vd_s[k] * vd_e[l] +
input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] *
vd_s[k] * vd_w[l] +
input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] *
vd_n[k] * vd_e[l] +
input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] *
vd_n[k] * vd_w[l];
output_t(b, i, j, k, l) = out_t;
} else {
T out_t = input_t(b, vt_f[j], vy_n[k], vx_w[l], i) * vd_b[j] *
vd_s[k] * vd_e[l] +
input_t(b, vt_f[j], vy_n[k], vx_e[l], i) * vd_b[j] *
vd_s[k] * vd_w[l] +
input_t(b, vt_f[j], vy_s[k], vx_w[l], i) * vd_b[j] *
vd_n[k] * vd_e[l] +
input_t(b, vt_f[j], vy_s[k], vx_e[l], i) * vd_b[j] *
vd_n[k] * vd_w[l] +
input_t(b, vt_b[j], vy_n[k], vx_w[l], i) * vd_f[j] *
vd_s[k] * vd_e[l] +
input_t(b, vt_b[j], vy_n[k], vx_e[l], i) * vd_f[j] *
vd_s[k] * vd_w[l] +
input_t(b, vt_b[j], vy_s[k], vx_w[l], i) * vd_f[j] *
vd_n[k] * vd_e[l] +
input_t(b, vt_b[j], vy_s[k], vx_e[l], i) * vd_f[j] *
vd_n[k] * vd_w[l];
output_t(b, j, k, l, i) = out_t;
}
}
}
}
}
}
}
template <typename T>
HOSTDEVICE inline T cubic_convolution1(T x, T A) {
return ((A + 2) * x - (A + 3)) * x * x + 1;
}
template <typename T>
HOSTDEVICE inline T cubic_convolution2(T x, T A) {
return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A;
}
template <typename T>
HOSTDEVICE inline void get_cubic_upsample_coefficients(T coeffs[4], T t) {
T A = -0.75;
T x1 = t;
coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A);
coeffs[1] = cubic_convolution1<T>(x1, A);
// opposite coefficients
T x2 = 1.0 - t;
coeffs[2] = cubic_convolution1<T>(x2, A);
coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A);
}
template <typename T>
static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) {
T coeffs[4];
get_cubic_upsample_coefficients<T>(coeffs, t);
return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3];
}
template <typename T>
static void BicubicInterpolation(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w,
const bool align_corners,
const DataLayout data_layout) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
for (int k = 0; k < out_h; k++) { // loop for images
T y_n = align_corners ? static_cast<T>(ratio_h * k)
: static_cast<T>(ratio_h * (k + 0.5) - 0.5);
int input_y = floorf(y_n);
const T y_t = y_n - input_y;
for (int l = 0; l < out_w; l++) {
T x_n = align_corners ? static_cast<T>(ratio_w * l)
: static_cast<T>(ratio_w * (l + 0.5) - 0.5);
int input_x = floorf(x_n);
const T x_t = x_n - input_x;
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
T coefficients[4];
// interp 4 times in x direction
for (int ii = 0; ii < 4; ii++) {
int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1),
static_cast<int>(0));
int access_x_0 =
std::max(std::min(input_x - 1, in_w - 1), static_cast<int>(0));
int access_x_1 =
std::max(std::min(input_x + 0, in_w - 1), static_cast<int>(0));
int access_x_2 =
std::max(std::min(input_x + 1, in_w - 1), static_cast<int>(0));
int access_x_3 =
std::max(std::min(input_x + 2, in_w - 1), static_cast<int>(0));
if (data_layout == DataLayout::kNCHW) {
coefficients[ii] =
cubic_interp<T>(input_t(i, j, access_y, access_x_0),
input_t(i, j, access_y, access_x_1),
input_t(i, j, access_y, access_x_2),
input_t(i, j, access_y, access_x_3), x_t);
} else {
coefficients[ii] =
cubic_interp<T>(input_t(i, access_y, access_x_0, j),
input_t(i, access_y, access_x_1, j),
input_t(i, access_y, access_x_2, j),
input_t(i, access_y, access_x_3, j), x_t);
}
}
// interp y direction
if (data_layout == DataLayout::kNCHW) {
output_t(i, j, k, l) =
cubic_interp<T>(coefficients[0], coefficients[1],
coefficients[2], coefficients[3], y_t);
} else {
output_t(i, k, l, j) =
cubic_interp<T>(coefficients[0], coefficients[1],
coefficients[2], coefficients[3], y_t);
}
}
}
}
}
}
template <typename T>
static void NearestNeighborInterpolateGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_h,
const float ratio_w, const int n, const int c, const int out_h,
const int out_w, const bool align_corners, const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
for (int k = 0; k < out_h; k++) { // loop for images
int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5)
: static_cast<int>(ratio_h * k);
for (int l = 0; l < out_w; l++) {
int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5)
: static_cast<int>(ratio_w * l);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
if (data_layout == DataLayout::kNCHW) {
input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l);
} else {
input_grad_t(i, in_k, in_l, j) += output_grad_t(i, k, l, j);
}
}
}
}
}
}
template <typename T>
static void NearestNeighbor3DInterpolateGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_d,
const float ratio_h, const float ratio_w, const int n, const int c,
const int out_d, const int out_h, const int out_w, const bool align_corners,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 5>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 5>::From(output_grad);
for (int d = 0; d < out_d; d++) {
int in_d = (align_corners) ? static_cast<int>(ratio_d * d + 0.5)
: static_cast<int>(ratio_d * d);
for (int k = 0; k < out_h; k++) { // loop for images
int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5)
: static_cast<int>(ratio_h * k);
for (int l = 0; l < out_w; l++) {
int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5)
: static_cast<int>(ratio_w * l);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
if (data_layout == DataLayout::kNCHW) {
input_grad_t(i, j, in_d, in_k, in_l) +=
output_grad_t(i, j, d, k, l);
} else {
input_grad_t(i, in_d, in_k, in_l, j) +=
output_grad_t(i, d, k, l, j);
}
}
}
}
}
}
}
template <typename T>
static void BilinearInterpolationGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_h,
const float ratio_w, const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w, const bool align_corners,
const int align_mode, const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int k = 0; k < out_h; k++) { // loop for images
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// bilinear interpolation grad
if (data_layout == DataLayout::kNCHW) {
const T grad = output_grad_t(i, j, k, l);
input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e);
input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e);
input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w);
input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w);
} else {
const T grad = output_grad_t(i, k, l, j);
input_grad_t(i, y_n, x_w, j) += static_cast<T>(grad * d_s * d_e);
input_grad_t(i, y_s, x_w, j) += static_cast<T>(grad * d_n * d_e);
input_grad_t(i, y_n, x_e, j) += static_cast<T>(grad * d_s * d_w);
input_grad_t(i, y_s, x_e, j) += static_cast<T>(grad * d_n * d_w);
}
}
}
}
}
}
template <typename T>
static void TrilinearInterpolationGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_d,
const float ratio_h, const float ratio_w, const int in_d, const int in_h,
const int in_w, const int n, const int c, const int out_d, const int out_h,
const int out_w, const bool align_corners, const int align_mode,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 5>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 5>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int j = 0; j < out_d; j++) { // loop for D
int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5)
: static_cast<int>(ratio_d * j);
t_f = (t_f > 0) ? t_f : 0;
int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1);
float idx_src_t = ratio_d * (j + 0.5) - 0.5;
idx_src_t = (idx_src_t > 0) ? idx_src_t : 0;
float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f;
float d_b = 1.f - d_f;
for (int k = 0; k < out_h; k++) { // loop for H
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
for (int l = 0; l < out_w; l++) { // loop for W
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
for (int b = 0; b < n; b++) { // loop for batches
for (int i = 0; i < c; i++) { // loop for channels
// trilinear interpolation grad
if (data_layout == DataLayout::kNCHW) {
const T grad = output_grad_t(b, i, j, k, l);
input_grad_t(b, i, t_f, y_n, x_w) +=
static_cast<T>(grad * d_b * d_s * d_e);
input_grad_t(b, i, t_f, y_n, x_e) +=
static_cast<T>(grad * d_b * d_s * d_w);
input_grad_t(b, i, t_f, y_s, x_w) +=
static_cast<T>(grad * d_b * d_n * d_e);
input_grad_t(b, i, t_f, y_s, x_e) +=
static_cast<T>(grad * d_b * d_n * d_w);
input_grad_t(b, i, t_b, y_n, x_w) +=
static_cast<T>(grad * d_f * d_s * d_e);
input_grad_t(b, i, t_b, y_n, x_e) +=
static_cast<T>(grad * d_f * d_s * d_w);
input_grad_t(b, i, t_b, y_s, x_w) +=
static_cast<T>(grad * d_f * d_n * d_e);
input_grad_t(b, i, t_b, y_s, x_e) +=
static_cast<T>(grad * d_f * d_n * d_w);
} else {
const T grad = output_grad_t(b, j, k, l, i);
input_grad_t(b, t_f, y_n, x_w, i) +=
static_cast<T>(grad * d_b * d_s * d_e);
input_grad_t(b, t_f, y_n, x_e, i) +=
static_cast<T>(grad * d_b * d_s * d_w);
input_grad_t(b, t_f, y_s, x_w, i) +=
static_cast<T>(grad * d_b * d_n * d_e);
input_grad_t(b, t_f, y_s, x_e, i) +=
static_cast<T>(grad * d_b * d_n * d_w);
input_grad_t(b, t_b, y_n, x_w, i) +=
static_cast<T>(grad * d_f * d_s * d_e);
input_grad_t(b, t_b, y_n, x_e, i) +=
static_cast<T>(grad * d_f * d_s * d_w);
input_grad_t(b, t_b, y_s, x_w, i) +=
static_cast<T>(grad * d_f * d_n * d_e);
input_grad_t(b, t_b, y_s, x_e, i) +=
static_cast<T>(grad * d_f * d_n * d_w);
}
}
}
}
}
}
}
template <typename T>
static void BicubicInterpolationGrad(const Tensor& output_grad,
Tensor* input_grad, const float ratio_h,
const float ratio_w, const int in_h,
const int in_w, const int n, const int c,
const int out_h, const int out_w,
const bool align_corners,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
for (int k = 0; k < out_h; k++) { // loop for images
T y_n = align_corners ? static_cast<T>(ratio_h * k)
: static_cast<T>(ratio_h * (k + 0.5) - 0.5);
int input_y = floorf(y_n);
T y_t = y_n - input_y;
for (int l = 0; l < out_w; l++) {
T x_n = align_corners ? static_cast<T>(ratio_w * l)
: static_cast<T>(ratio_w * (l + 0.5) - 0.5);
int input_x = floorf(x_n);
T x_t = x_n - input_x;
T x_coeffs[4];
T y_coeffs[4];
get_cubic_upsample_coefficients<T>(x_coeffs, x_t);
get_cubic_upsample_coefficients<T>(y_coeffs, y_t);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// bicubic interpolation grad
for (int ii = 0; ii < 4; ii++) {
for (int jj = 0; jj < 4; jj++) {
int access_x = std::max(std::min(input_x - 1 + ii, in_w - 1),
static_cast<int>(0));
int access_y = std::max(std::min(input_y - 1 + jj, in_h - 1),
static_cast<int>(0));
if (data_layout == DataLayout::kNCHW) {
T grad = output_grad_t(i, j, k, l);
input_grad_t(i, j, access_y, access_x) +=
grad * y_coeffs[jj] * x_coeffs[ii];
} else {
T grad = output_grad_t(i, k, l, j);
input_grad_t(i, access_y, access_x, j) +=
grad * y_coeffs[jj] * x_coeffs[ii];
}
}
}
}
}
}
}
}
template <typename T>
static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_w = ctx.Attr<int>("out_w");
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
float scale_w = -1.;
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_w = new_size[0];
} else {
// float scale_w = -1;
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale_w = scale_data[0];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
} else {
if (scale.size() > 0) {
scale_w = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
}
}
if (scale_w > 0.) {
out_w = static_cast<int>(in_w * scale_w);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_w = out_size_data[0];
}
}
PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument(
"out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
framework::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_w};
} else {
dim_out = {n, out_w, c};
}
output->mutable_data<T>(dim_out, ctx.GetPlace());
if (in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("linear" == interp_method) {
LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w,
align_corners, align_mode, data_layout);
}
}
template <typename T>
static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale_h = -1;
float scale_w = -1;
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_h = new_size[0];
out_w = new_size[1];
} else {
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
if (scale_data.size() > 1) {
scale_h = scale_data[0];
scale_w = scale_data[1];
} else {
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
} else {
if (scale.size() > 1) {
scale_h = scale[0];
scale_w = scale[1];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
}
}
if (scale_h > 0. && scale_w > 0.) {
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_h = out_size_data[0];
out_w = out_size_data[1];
}
}
PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument(
"out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument(
"out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
framework::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_h, out_w};
} else {
dim_out = {n, out_h, out_w, c};
}
output->mutable_data<T>(dim_out, ctx.GetPlace());
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("bilinear" == interp_method) {
BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c,
out_h, out_w, align_corners, align_mode,
data_layout);
} else if ("nearest" == interp_method) {
NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h,
out_w, align_corners, data_layout);
} else if ("bicubic" == interp_method) {
BicubicInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c,
out_h, out_w, align_corners, data_layout);
}
}
template <typename T>
static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_d = ctx.Attr<int>("out_d");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale_d = -1;
float scale_h = -1;
float scale_w = -1;
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
} else {
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
if (scale_data.size() > 1) {
scale_d = scale_data[0];
scale_h = scale_data[1];
scale_w = scale_data[2];
} else {
scale_d = scale_data[0];
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0, true,
platform::errors::InvalidArgument(
"The scale_d in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
} else {
if (scale.size() > 1) {
scale_d = scale[0];
scale_h = scale[1];
scale_w = scale[2];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0, true,
platform::errors::InvalidArgument(
"The scale_d in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
}
}
if (scale_w > 0. && scale_h > 0. && scale_d > 0.) {
out_d = static_cast<int>(in_d * scale_d);
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_d = out_size_data[0];
out_h = out_size_data[1];
out_w = out_size_data[2];
}
}
PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument(
"out_d in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument(
"out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument(
"out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
framework::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_d, out_h, out_w};
} else {
dim_out = {n, out_d, out_h, out_w, c};
}
output->mutable_data<T>(dim_out, ctx.GetPlace());
if (in_d == out_d && in_h == out_h && in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
float new_scale_d = 0.f;
new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d)
: static_cast<float>(in_d) / out_d;
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(new_scale_d);
}
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("trilinear" == interp_method) {
TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d,
in_h, in_w, n, c, out_d, out_h, out_w,
align_corners, align_mode, data_layout);
} else if ("nearest" == interp_method) {
NearestNeighbor3DInterpolate<T>(input, output, ratio_d, ratio_h, ratio_w, n,
c, out_d, out_h, out_w, align_corners,
data_layout);
}
}
template <typename T>
static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor& output_grad) {
auto* input = ctx.Input<Tensor>("X");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_w = ctx.Attr<int>("out_w");
float scale_w = -1.0;
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale_w = scale_data[0];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
} else {
if (scale.size() > 0) {
scale_w = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
}
}
if (scale_w > 0.) {
out_w = static_cast<int>(in_w * scale_w);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_w = out_size_data[0];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_w = new_size[0];
}
framework::DDim dim_grad;
if (data_layout == DataLayout::kNCHW) {
dim_grad = {n, c, in_w};
} else {
dim_grad = {n, in_w, c};
}
input_grad->mutable_data<T>(dim_grad, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
phi::funcs::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("linear" == interp_method) {
LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c,
out_w, align_corners, align_mode, data_layout);
}
}
template <typename T>
static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor& output_grad) {
auto* input = ctx.Input<Tensor>("X");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale_h = -1;
float scale_w = -1;
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
if (scale_data.size() > 1) {
scale_h = scale_data[0];
scale_w = scale_data[1];
} else {
scale_w = scale_data[0];
scale_h = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
} else {
if (scale.size() > 1) {
scale_h = scale[0];
scale_w = scale[1];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
}
}
if (scale_h > 0. && scale_w > 0.) {
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_h = out_size_data[0];
out_w = out_size_data[1];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_h = new_size[0];
out_w = new_size[1];
}
framework::DDim dim_grad;
if (data_layout == DataLayout::kNCHW) {
dim_grad = {n, c, in_h, in_w};
} else {
dim_grad = {n, in_h, in_w, c};
}
input_grad->mutable_data<T>(dim_grad, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
phi::funcs::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("bilinear" == interp_method) {
BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w,
in_h, in_w, n, c, out_h, out_w, align_corners,
align_mode, data_layout);
} else if ("nearest" == interp_method) {
NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w,
n, c, out_h, out_w, align_corners,
data_layout);
} else if ("bicubic" == interp_method) {
BicubicInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h,
in_w, n, c, out_h, out_w, align_corners,
data_layout);
}
}
template <typename T>
static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor output_grad) {
auto* input = ctx.Input<Tensor>("X");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_d = ctx.Attr<int>("out_d");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale_d = -1;
float scale_h = -1;
float scale_w = -1;
auto scale_tensor = ctx.Input<Tensor>("Scale");
auto scale = ctx.Attr<std::vector<float>>("scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
if (scale_data.size() > 1) {
scale_d = scale_data[0];
scale_h = scale_data[1];
scale_w = scale_data[2];
} else {
scale_d = scale_data[0];
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0, true,
platform::errors::InvalidArgument(
"The scale_d in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
} else {
if (scale.size() > 1) {
scale_d = scale[0];
scale_h = scale[1];
scale_w = scale[2];
PADDLE_ENFORCE_EQ(
scale_w > 0, true,
platform::errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0, true,
platform::errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0, true,
platform::errors::InvalidArgument(
"The scale_d in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
}
}
if (scale_d > 0. && scale_h > 0. && scale_w > 0.) {
out_d = static_cast<int>(in_d * scale_d);
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_d = out_size_data[0];
out_h = out_size_data[1];
out_w = out_size_data[2];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
}
framework::DDim dim_grad;
if (data_layout == DataLayout::kNCHW) {
dim_grad = {n, c, in_d, in_h, in_w};
} else {
dim_grad = {n, in_d, in_h, in_w, c};
}
input_grad->mutable_data<T>(dim_grad, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
phi::funcs::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_d == out_d && in_h == out_h && in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
float new_scale_d = 0.f;
new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d)
: static_cast<float>(in_d) / out_d;
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(new_scale_d);
}
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
if ("trilinear" == interp_method) {
TrilinearInterpolationGrad<T>(
output_grad, input_grad, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n,
c, out_d, out_h, out_w, align_corners, align_mode, data_layout);
} else if ("nearest" == interp_method) {
NearestNeighbor3DInterpolateGrad<T>(output_grad, input_grad, ratio_d,
ratio_h, ratio_w, n, c, out_d, out_h,
out_w, align_corners, data_layout);
}
}
template <typename T>
class InterpolateV2Kernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto input_dims = input->dims();
if (input_dims.size() == 3) { // 1D interpolation
Interpolate1DCPUFwd<T>(ctx, *input, output);
} else if (input_dims.size() == 4) { // 2D interpolation
Interpolate2DCPUFwd<T>(ctx, *input, output);
} else if (input_dims.size() == 5) { // 3D interpolation
Interpolate3DCPUFwd<T>(ctx, *input, output);
}
}
};
template <typename T>
class InterpolateV2GradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto output_grad_dims = output_grad->dims();
if (output_grad_dims.size() == 3) { // 1D interpolation grad
Interpolate1DCPUBwd<T>(ctx, input_grad, *output_grad);
} else if (output_grad_dims.size() == 4) { // 2D interpolation grad
Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad);
} else if (output_grad_dims.size() == 5) { // 3D interpolation grad
Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad);
}
}
};
} // namespace operators
} // namespace paddle
|
generator_spgemm_csr_asparse.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include "generator_spgemm_csr_asparse.h"
#include "generator_common.h"
#include "libxsmm_main.h"
LIBXSMM_API_INTERN
void libxsmm_generator_spgemm_csr_asparse( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx,
const double* i_values ) {
unsigned int l_m;
unsigned int l_z;
unsigned int l_row_elements;
unsigned int l_flop_count = 0;
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
LIBXSMM_UNUSED(i_values);
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* reset C if beta is zero */
if (0 != (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=0 */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n", (unsigned int)i_xgemm_desc->m);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
if ( i_xgemm_desc->m > 1 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) { C[(l_m*%u)+l_n] = 0.0; }\n", (unsigned int)i_xgemm_desc->ldc, (unsigned int)i_xgemm_desc->ldc);
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) { C[(l_m*%u)+l_n] = 0.0f; }\n", (unsigned int)i_xgemm_desc->ldc, (unsigned int)i_xgemm_desc->ldc);
}
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* determine the correct simd pragma for each architecture */
if ( ( strcmp( i_arch, "noarch" ) == 0 ) ||
( strcmp( i_arch, "wsm" ) == 0 ) ||
( strcmp( i_arch, "snb" ) == 0 ) ||
( strcmp( i_arch, "hsw" ) == 0 ) ) {
if ( i_xgemm_desc->n > 7 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(8)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else if ( i_xgemm_desc->n > 3 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(4)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else if ( i_xgemm_desc->n > 1 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(2)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {}
} else if ( ( strcmp( i_arch, "knl" ) == 0 ) ||
( strcmp( i_arch, "knm" ) == 0 ) ||
( strcmp( i_arch, "skx" ) == 0 ) ||
( strcmp( i_arch, "clx" ) == 0 ) ||
( strcmp( i_arch, "cpx" ) == 0 ) ) {
if ( (i_xgemm_desc->n > 1) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(16)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
} else {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_ARCH );
return;
}
if ( (i_xgemm_desc->n > 1) &&
((LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0) &&
((LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
/* generate the actual kernel */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
for ( l_m = 0; l_m < (unsigned int)i_xgemm_desc->m; l_m++ ) {
l_row_elements = i_row_idx[l_m+1] - i_row_idx[l_m];
for ( l_z = 0; l_z < l_row_elements; l_z++ ) {
/* check k such that we just use columns which actually need to be multiplied */
if ( i_column_idx[i_row_idx[l_m] + l_z] < (unsigned int)i_xgemm_desc->k ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[%u+l_n] += A[%u] * B[%u+l_n];\n", l_m * i_xgemm_desc->ldc, i_row_idx[l_m] + l_z, i_column_idx[i_row_idx[l_m] + l_z]*i_xgemm_desc->ldb );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_flop_count += 2;
}
}
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* add flop counter */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n#pragma omp atomic\n#endif\nlibxsmm_num_total_flops += %u;\n#endif\n", l_flop_count * (unsigned int)i_xgemm_desc->m);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
|
elemwise_binary_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_op.h
* \brief Function definition of elementwise binary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#include <mxnet/operator_util.h>
#include <mxnet/op_attr_types.h>
#include <vector>
#include <string>
#include <utility>
#include <typeinfo>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../../engine/openmp.h"
#include "elemwise_unary_op.h"
#include "../../common/utils.h"
#include "./init_op.h"
#include "../operator_common.h"
namespace mxnet {
namespace op {
/*! Gather binary operator functions into ElemwiseBinaryOp class */
class ElemwiseBinaryOp : public OpBase {
public:
/*! \brief For sparse, assume missing rvalue is 0 */
template <typename OP, int Req>
struct MissingRValueOp {
typedef OP Operation;
template <typename DType>
MSHADOW_XINLINE static void Map(int i, DType* out, const DType* lhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0)));
}
};
/*! \brief For sparse, assume missing lvalue is 0 */
template <typename OP, int Req>
struct MissingLValueOp {
typedef OP Operation;
template <typename DType>
MSHADOW_XINLINE static void Map(int i, DType* out, const DType* rhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i]));
}
};
private:
/*!
* \brief CSR operation requires temp space
*/
enum ResourceRequestType { kTempSpace };
/*!
* \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input
* CPU-Only version
*/
template <typename DType, typename OP, typename xpu>
static inline size_t FillDense(mshadow::Stream<xpu>* s,
const size_t idx_l,
const size_t idx_r,
const OpReqType req,
mshadow::Tensor<xpu, 2, DType>* out,
const size_t iter_out) {
const int index_out_min = static_cast<int>(std::min(idx_l, idx_r));
if (static_cast<size_t>(index_out_min) > iter_out) {
const DType zero_input_val = OP::Map(DType(0), DType(0));
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) {
Fill<false>(s, (*out)[i], req, zero_input_val);
}
}
return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int'
}
static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) {
return a1.var() == a2.var();
}
public:
/*! \brief Minimum of three */
static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
private:
template <typename LOP, typename ROP>
static void BackwardUseNone_(const nnvm::NodeAttrs& attrs,
mshadow::Stream<cpu>* s,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
using namespace mxnet_op;
const size_t size = static_cast<size_t>((outputs[0].Size() + DataType<DType>::kLanes - 1) /
DataType<DType>::kLanes);
const DType* ograd_dptr = inputs[0].dptr<DType>();
if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>());
} else if (req[0] != kNullOp) {
DType* lgrad_dptr = outputs[0].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
Kernel<mxnet_op::op_with_req<LOP, Req>, cpu>::Launch(s, size, lgrad_dptr, ograd_dptr);
});
}
if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>());
} else if (req[1] != kNullOp) {
DType* rgrad_dptr = outputs[1].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
Kernel<mxnet_op::op_with_req<ROP, Req>, cpu>::Launch(s, size, rgrad_dptr, ograd_dptr);
});
}
});
}
template <typename LOP, typename ROP>
static void BackwardUseIn_(const nnvm::NodeAttrs& attrs,
mshadow::Stream<cpu>* s,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DCHECK_EQ(outputs.size(), 2U);
DCHECK_EQ(inputs.size(), 3U);
const DType* ograd_dptr = inputs[0].dptr<DType>();
const DType* lhs_dptr = inputs[1].dptr<DType>();
const DType* rhs_dptr = inputs[2].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
const size_t size =
static_cast<size_t>((outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) /
mxnet_op::DataType<DType>::kLanes);
DType* lgrad_dptr = outputs[0].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>,
cpu>::Launch(s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);
});
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
const size_t size =
static_cast<size_t>((outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) /
mxnet_op::DataType<DType>::kLanes);
DType* rgrad_dptr = outputs[1].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>,
cpu>::Launch(s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);
});
});
}
template <typename xpu,
typename LOP,
typename ROP,
bool in0_ok_dense = false,
bool in1_ok_dense = false,
bool in2_ok_dense = false,
typename BackupCompute>
static inline void RspRspOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs,
BackupCompute backup_compute) {
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
// lhs grad
if (req[0] != kNullOp) {
// RspRspOp can handle dense outputs so long as OP(0, 0) == 0
RspRspOp<LOP>(
s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], false, false, false, false);
// lhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], false, false, true, false);
}
// rhs grad
if (req[1] != kNullOp) {
RspRspOp<ROP>(
s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], false, false, false, false);
// rhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], false, false, true, false);
}
}
public:
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template <typename OP>
static void RspRspOp(mshadow::Stream<cpu>* s,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template <typename OP>
static void RspRspOp(mshadow::Stream<gpu>* s,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template <typename OP>
static void CsrCsrOp(mshadow::Stream<cpu>* s,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template <typename OP>
static void CsrCsrOp(mshadow::Stream<gpu>* s,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template <typename OP>
static void DnsCsrDnsOp(mshadow::Stream<cpu>* s,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template <typename OP>
static void DnsCsrDnsOp(mshadow::Stream<gpu>* s,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template <typename xpu, typename OP>
static void DnsCsrCsrOp(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output,
const bool reverse);
/*! \brief DNS -op- RSP binary operator for non-canonical NDArray */
template <typename xpu, typename OP>
static void DnsRspDnsOp(mshadow::Stream<xpu>* s,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& lhs,
const NDArray& rhs,
OpReqType req,
const NDArray& output,
const bool reverse);
public:
/*!
* \brief Rsp-op-Rsp operation which produces a dense result
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs);
/*!
* \brief Allow one of the binary inputs to be dense and still produce a sparse output.
* Typically used for sparse * dense = sparse.
* Note: for csr, it dispatches to fallback other than csr, csr -> csr
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name;
const auto& lhs_stype = in_attrs->at(0);
const auto& rhs_stype = in_attrs->at(1);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex =
invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns -> dns
dispatched =
storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex);
}
if (!dispatched && ((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) {
// rsp, dns -> rsp
// dns, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex);
}
if (!dispatched && ((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage))) {
// csr, dns -> csr
// dns, csr -> csr
dispatched =
storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
/*!
* \brief Allow one of the inputs to be dense and produce a dense output,
* for rsp inputs only support when both inputs are rsp type.
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
template <bool cpu_only, bool rsp, bool csr>
static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2);
CHECK_EQ(out_attrs->size(), 1);
const auto lhs_stype = (*in_attrs)[0];
const auto rhs_stype = (*in_attrs)[1];
bool dispatched = false;
const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex =
invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns ... -> dns
dispatched =
storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp, ... -> rsp
dispatched = storage_type_assign(
out_attrs, kRowSparseStorage, dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr, ... -> csr
dispatched = storage_type_assign(out_attrs, kCSRStorage, dispatch_mode, dispatch_ex);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) ||
(lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) {
// dense, csr -> dense / csr, dense -> dense
dispatched =
storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) {
// dense, rsp -> dense / rsp, dense -> dense
dispatched =
storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatch_fallback(out_attrs, dispatch_mode);
}
return true;
}
/*!
* \brief Backward pass computing input gradient using forward inputs
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs);
template <typename xpu, typename OP>
static void ComputeInt(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp)
return;
Stream<xpu>* s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) +
DataType<DType>::kLanes - 1) /
DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template <typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp)
return;
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
if (outputs[0].type_flag_ == mshadow::kBool) {
LOG(FATAL) << "Operator " << attrs.op->name << " does not support boolean type";
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) +
DataType<DType>::kLanes - 1) /
DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template <typename xpu, typename OP>
static void MixedUnaryBackwardUseInCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp)
return;
Stream<xpu>* s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
if (mxnet::common::is_int(outputs[0].type_flag_) || outputs[0].type_flag_ == mshadow::kBool) {
LOG(FATAL) << "gradient computation of operator " << attrs.op->name << " for "
<< mshadow::dtype_string(outputs[0].type_flag_) << " type is not supported";
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) +
DataType<DType>::kLanes - 1) /
DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template <typename xpu, typename OP>
static void MixedUnaryBackwardUseInOutCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp)
return;
Stream<xpu>* s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 1U);
if (mxnet::common::is_int(outputs[0].type_flag_) || outputs[0].type_flag_ == mshadow::kBool) {
LOG(FATAL) << "gradient computation of operator " << attrs.op->name << " for "
<< mshadow::dtype_string(outputs[0].type_flag_) << " type is not supported";
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[2].Size()) +
DataType<DType>::kLanes - 1) /
DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[2].dptr<DType>());
}
});
});
}
template <typename xpu, typename OP>
static void ComputeWithBool(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp)
return;
Stream<xpu>* s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) +
DataType<DType>::kLanes - 1) /
DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template <typename xpu, typename OP>
static void ComputeLogic(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp)
return;
Stream<xpu>* s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(inputs[1].type_flag_, EType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) +
DataType<DType>::kLanes - 1) /
DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, size, outputs[0].dptr<bool>(), inputs[0].dptr<DType>(), inputs[1].dptr<EType>());
}
});
});
});
}
template <typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp)
return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) &&
(out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false);
} else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) {
// csr, csr -> csr
CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage) ? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage) ? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrDnsOp<OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage) ? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kRowSparseStorage);
const NDArray& rsp = (reverse) ? inputs[0] : inputs[1];
DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
/*! \brief ComputeEx allowing dense lvalue and/or rvalue */
template <typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense>
static void ComputeDnsLRValueEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp)
return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) &&
((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
lhs_may_be_dense && rhs_may_be_dense) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
// rsp, dns -> rsp
// dns, rsp -> rsp
// More than once dense not allowed (this will be checked in RspRspOp):
// rsp, dns -> dns <-- NOT ALLOWED
// dns, rsp -> dns <-- NOT ALLOWED
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
RspRspOp<OP>(s,
attrs,
ctx,
inputs[0],
inputs[1],
req[0],
outputs[0],
lhs_may_be_dense,
rhs_may_be_dense,
false,
false);
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kCSRStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage) ? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage) ? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrCsrOp<xpu, OP>(attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template <typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNone(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
BackwardUseNone_<LOP, ROP>(attrs, s, inputs, req, outputs);
}
template <typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs.size(), 1U); // output grad
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto in_stype = inputs[0].storage_type();
const auto lhs_stype = outputs[0].storage_type();
const auto rhs_stype = outputs[1].storage_type();
// lhs grad
if (req[0] != kNullOp) {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
DCHECK_LT(std::fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
// rhs grad
if (req[1] != kNullOp) {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
DCHECK_LT(std::fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
}
template <typename xpu, typename LOP, typename ROP>
static inline void BackwardUseIn(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
BackwardUseIn_<LOP, ROP>(attrs, s, inputs, req, outputs);
}
template <typename xpu, typename LOP, typename ROP>
static inline void BackwardUseInEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto lhs_grad_stype = outputs[0].storage_type();
const auto rhs_grad_stype = outputs[1].storage_type();
if (ContainsOnlyStorage(inputs, kRowSparseStorage) &&
(lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) &&
(rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) {
// rsp, rsp, rsp -> [dns, rsp], [dns, rsp]
RspRspOpBackward<xpu, LOP, ROP, false, false, false>(
attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>);
} else {
LOG(FATAL) << "Not Implemented";
}
}
}; // class ElemwiseBinaryOp
/*! \brief Binary launch */
#define MXNET_OPERATOR_REGISTER_BINARY(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs) { \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
/*! \brief Binary launch, with FComputeEx for csr and rsp available */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseStorageType<2, 1, true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>( \
"FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; \
})
/*! \brief Binary launch, with FComputeEx for csr and rsp available.
when inputs contain both sparse and dense, sparse output is preferred. */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", ElemwiseBinaryOp::PreferSparseStorageType) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>( \
"FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; \
})
/*! \brief Binary launch, dense result
* FInferStorageType attr is not set using this macro.
* By default DefaultStorageType is used.
*/
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::SparseSparseWithDenseResult) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>)
/*! \brief Binary launch, with FComputeEx for prefer dense */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>( \
"FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; \
})
#if MXNET_USE_CUDA
struct ElemwiseBinaryRTCCompute {
std::string OP;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
};
struct ElemwiseBinaryRTCBwdUseNone {
std::string LOP;
std::string ROP;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
};
struct ElemwiseBinaryRTCBwdUseIn {
std::string LOP;
std::string ROP;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
};
#endif
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
|
NearestNeighbour.h | //
// Created by 周泓宽 on 17.12.21.
//
#ifndef KINECT_FUSION_NEARESTNEIGHBOUR_H
#define KINECT_FUSION_NEARESTNEIGHBOUR_H
#endif //KINECT_FUSION_NEARESTNEIGHBOUR_H
#pragma once
#include <flann/flann.hpp>
#include "Eigen.h"
struct Match {
int idx;
float weight;
};
class NearestNeighborSearch {
public:
virtual ~NearestNeighborSearch() = default;
virtual void setMatchingMaxDistance(float maxDistance) {
m_maxDistance = maxDistance;
}
virtual void buildIndex(const std::vector<Eigen::Vector3f>& targetPoints) = 0;
virtual std::vector<Match> queryMatches(const std::vector<Vector3f>& transformedPoints) = 0;
protected:
float m_maxDistance;
NearestNeighborSearch() : m_maxDistance{ 0.005f } {}
};
/**
* Brute-force nearest neighbor search.
*/
class NearestNeighborSearchBruteForce : public NearestNeighborSearch {
public:
NearestNeighborSearchBruteForce() : NearestNeighborSearch() {}
void buildIndex(const std::vector<Eigen::Vector3f>& targetPoints) override {
m_points = targetPoints;
}
std::vector<Match> queryMatches(const std::vector<Vector3f>& transformedPoints) override {
const unsigned nMatches = transformedPoints.size();
std::vector<Match> matches(nMatches);
const unsigned nTargetPoints = m_points.size();
std::cout << "nMatches: " << nMatches << std::endl;
std::cout << "nTargetPoints: " << nTargetPoints << std::endl;
#pragma omp parallel for
for (int i = 0; i < (int)nMatches; i++) {
matches[i] = getClosestPoint(transformedPoints[i]);
}
return matches;
}
private:
std::vector<Eigen::Vector3f> m_points;
Match getClosestPoint(const Vector3f& p) {
int idx = -1;
float minDist = std::numeric_limits<float>::max();
for (unsigned int i = 0; i < m_points.size(); ++i) {
float dist = (p - m_points[i]).norm();
if (minDist > dist) {
idx = i;
minDist = dist;
}
}
if (minDist <= m_maxDistance)
return Match{ idx, 1.f };
else
return Match{ -1, 0.f };
}
};
/**
* Nearest neighbor search using FLANN.
*/
class NearestNeighborSearchFlann : public NearestNeighborSearch {
public:
NearestNeighborSearchFlann() :
NearestNeighborSearch(),
m_nTrees{ 1 },
m_index{ nullptr },
m_flatPoints{ nullptr }
{ }
~NearestNeighborSearchFlann() override {
if (m_index) {
delete m_flatPoints;
delete m_index;
m_flatPoints = nullptr;
m_index = nullptr;
}
}
void buildIndex(const std::vector<Eigen::Vector3f>& targetPoints) override {
std::cout << "Initializing FLANN index with " << targetPoints.size() << " points." << std::endl;
// FLANN requires that all the points be flat. Therefore we copy the points to a separate flat array.
m_flatPoints = new float[targetPoints.size() * 3];
for (size_t pointIndex = 0; pointIndex < targetPoints.size(); pointIndex++) {
for (size_t dim = 0; dim < 3; dim++) {
m_flatPoints[pointIndex * 3 + dim] = targetPoints[pointIndex][dim];
}
}
flann::Matrix<float> dataset(m_flatPoints, targetPoints.size(), 3);
// Building the index takes some time.
m_index = new flann::Index<flann::L2<float>>(dataset, flann::KDTreeIndexParams(m_nTrees));
m_index->buildIndex();
std::cout << "FLANN index created." << std::endl;
}
std::vector<Match> queryMatches(const std::vector<Vector3f>& transformedPoints) override {
if (!m_index) {
std::cout << "FLANN index needs to be build before querying any matches." << std::endl;
return {};
}
// FLANN requires that all the points be flat. Therefore we copy the points to a separate flat array.
auto* queryPoints = new float[transformedPoints.size() * 3];
for (size_t pointIndex = 0; pointIndex < transformedPoints.size(); pointIndex++) {
for (size_t dim = 0; dim < 3; dim++) {
queryPoints[pointIndex * 3 + dim] = transformedPoints[pointIndex][dim];
}
}
flann::Matrix<float> query(queryPoints, transformedPoints.size(), 3);
flann::Matrix<int> indices(new int[query.rows * 1], query.rows, 1);
flann::Matrix<float> distances(new float[query.rows * 1], query.rows, 1);
// Do a knn search, searching for 1 nearest point and using 16 checks.
flann::SearchParams searchParams{ 16 };
searchParams.cores = 0;
m_index->knnSearch(query, indices, distances, 1, searchParams);
// Filter the matches.
const unsigned nMatches = transformedPoints.size();
std::vector<Match> matches;
matches.reserve(nMatches);
for (int i = 0; i < (int)nMatches; ++i) {
if (*distances[i] <= m_maxDistance)
matches.push_back(Match{ *indices[i], 1.f });
else
matches.push_back(Match{ -1, 0.f });
}
// Release the memory.
delete[] query.ptr();
delete[] indices.ptr();
delete[] distances.ptr();
return matches;
}
private:
int m_nTrees;
flann::Index<flann::L2<float>>* m_index;
float* m_flatPoints;
};
|
reduction-clause.c | /*
$ gcc -fopenmp -O2 reduction-clause.c -o reduction-clause
*/
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int i, n=20, a[n],suma=0;
if(argc < 2) {
fprintf(stderr,"Falta iteraciones\n");
exit(-1);
}
n = atoi(argv[1]);
if (n>20){
n=20;
printf("n=%d\n",n);
}
for (i=0; i<n; i++)
a[i] = i;
// Sobre una variable que fuera de reduction es compartida, realiza un calculo acumulativo (global)
// de forma que dentro de reduction la varibale se comporta como private
// realiza calculos de forma privada para los calculos que le toquen, y al salir de reduction
// se asigna el resultado en la variable que vuelve a ser compartida
#pragma omp parallel for reduction(+:suma)
for (i=0; i<n; i++)
suma += a[i];
printf("Tras 'parallel' suma=%d\n",suma);
} |
MarchingCubeComponent.h | #pragma once
#include "ECS.h"
#include "glm/gtx/dual_quaternion.hpp"
#include "glm/gtx/string_cast.hpp"
#include "utils.h"
#include <array>
#include <cstdint>
#include "DrawableComponent.h"
struct Cell
{
glm::vec3 points[8];
float val[8];
};
class MarchingCubeComponent : public Component
{
public:
MarchingCubeComponent(float __xsize, float __ysize, float __zsize, float __cellSize)
: Component{}
, _xsize { __xsize }
, _ysize { __ysize }
, _zsize { __zsize }
, _cellSize { __cellSize }
{}
inline void changeGrid(std::uint64_t __x, std::uint64_t __y, std::uint64_t __z, std::uint64_t __i, float __inside)
{
_grid[__x][__y][__z].val[__i] += __inside;
}
inline void addFunc(std::function<float(glm::vec3)> __f)
{
_fs.emplace_back(__f);
}
void init() override
{
// Grid initialization loop in middle of cell
for (float x = -_xsize/2; x < _xsize/2; x += _cellSize)
{
std::vector<std::vector<Cell>> grid2D;
for (float y = -_ysize/2; y < _ysize/2; y += _cellSize)
{
std::vector<Cell> grid1D;
for (float z = -_zsize/2; z < _zsize/2; z += _cellSize)
{
Cell cell;
float s = _cellSize/2;
cell.points[0] = {x-s,y+s,z-s};
cell.points[1] = {x+s,y+s,z-s};
cell.points[2] = {x+s,y-s,z-s};
cell.points[3] = {x-s,y-s,z-s};
cell.points[4] = {x-s,y+s,z+s};
cell.points[5] = {x+s,y+s,z+s};
cell.points[6] = {x+s,y-s,z+s};
cell.points[7] = {x-s,y-s,z+s};
for (std::uint64_t i = 0; i < 8; ++i)
{
cell.val[i] = 0.0f;
}
grid1D.emplace_back(cell);
}
grid2D.emplace_back(grid1D);
}
_grid.emplace_back(grid2D);
}
}
inline std::array<glm::vec3, 3> _computeVertNormal(std::array<glm::vec3, 3> __p, float __eps) const
{
std::array<glm::vec3, 3> n {};
for (const auto &f : _fs)
{
for (std::uint8_t i = 0; i < 3; ++i)
{
float fp = f(__p[i]);
n[i].x += f(glm::vec3{__p[i].x-__eps, __p[i].y, __p[i].z}) - fp;
n[i].y += f(glm::vec3{__p[i].x, __p[i].y-__eps, __p[i].z}) - fp;
n[i].z += f(glm::vec3{__p[i].x, __p[i].y, __p[i].z-__eps}) - fp;
}
}
for (std::uint8_t i = 0; i < 3; ++i)
n[i] = glm::normalize(n[i]);
return n;
}
void update([[maybe_unused]] double __deltaTime) override
{
std::vector<GLfloat> vertices;
std::vector<GLfloat> normals;
std::vector<GLuint> indices;
for (std::uint64_t x = 0; x < _grid.size(); ++x)
{
for (std::uint64_t y = 0; y < _grid[x].size(); ++y)
{
for (std::uint64_t z = 0; z < _grid[x][y].size(); ++z)
{
std::uint64_t cubeIndex = _getCubeIndex(_grid[x][y][z].val);
std::array<glm::vec3, 12> vertList = _getVertList(cubeIndex, _grid[x][y][z].points, _grid[x][y][z].val);
for (std::uint64_t i = 0; _triTable[cubeIndex][i] != -1; i += 3)
{
std::uint64_t oldSize = vertices.size();
std::array<glm::vec3, 3> p;
p[0] = vertList[_triTable[cubeIndex][i]];
p[1] = vertList[_triTable[cubeIndex][i+1]];
p[2] = vertList[_triTable[cubeIndex][i+2]];
vertices.resize(oldSize+9);
memcpy(&vertices[oldSize], &p, sizeof(float)*9);
std::array<glm::vec3, 3> n = _computeVertNormal(p, 0.01f);
normals.resize(oldSize+9); // Vert and Norm vectors are the same size
memcpy(&normals[oldSize], &n, sizeof(float)*9);
}
}
}
}
auto& drawable = entity->getComponent<DrawableComponent>();
drawable.setVertices(vertices);
ASSERT(vertices.size()%3==0, "vertices must be power of 3");
indices.resize(vertices.size()/3);
std::iota(indices.begin(), indices.end(), 0);
drawable.setIndices(indices);
drawable.setNormals(normals);
drawable.updateGeometry();
_cleanGrid();
}
std::vector<std::vector<std::vector<Cell>>>& grid() { return _grid; }
private:
const float _xsize;
const float _ysize;
const float _zsize;
const float _cellSize;
std::vector<std::vector<std::vector<Cell>>> _grid;
std::vector<std::function<float(glm::vec3)>> _fs;
inline void _cleanGrid()
{
// Reset all cells
#pragma omp parallel for
for (std::uint64_t x = 0; x < _grid.size(); ++x)
{
for (std::uint64_t y = 0; y < _grid[x].size(); ++y)
{
for (std::uint64_t z = 0; z < _grid[x][y].size(); ++z)
{
for (std::uint8_t i = 0; i < 8; ++i)
{
_grid[x][y][z].val[i] = 0.0f;
}
}
}
}
_fs.clear();
}
static inline std::uint64_t _getCubeIndex(float __val[8])
{
std::uint64_t cubeIndex = 0;
if (__val[0] < 0.5f) cubeIndex |= 1;
if (__val[1] < 0.5f) cubeIndex |= 2;
if (__val[2] < 0.5f) cubeIndex |= 4;
if (__val[3] < 0.5f) cubeIndex |= 8;
if (__val[4] < 0.5f) cubeIndex |= 16;
if (__val[5] < 0.5f) cubeIndex |= 32;
if (__val[6] < 0.5f) cubeIndex |= 64;
if (__val[7] < 0.5f) cubeIndex |= 128;
return cubeIndex;
}
static inline bool check(const glm::vec3 &left, const glm::vec3 &right)
{
if (left.x < right.x)
return true;
else if (left.x > right.x)
return false;
if (left.y < right.y)
return true;
else if (left.y > right.y)
return false;
if (left.z < right.z)
return true;
else if (left.z > right.z)
return false;
return false;
}
static inline glm::vec3 _vInterp(glm::vec3 __p1, glm::vec3 __p2, float __valp1, float __valp2)
{
if (check(__p2, __p1))
{
glm::vec3 temp = __p1;
__p1 = __p2;
__p2 = temp;
float temp2 = __valp1;
__valp1 = __valp2;
__valp2 = temp2;
}
glm::vec3 p;
if(fabs(__valp1 - __valp2) > 0.00001)
p = __p1 + (__p2 - __p1)/(__valp2 - __valp1)*(0.5f - __valp1);
else
p = __p1;
return p;
}
static inline std::array<glm::vec3, 12> _getVertList(float __cubeIndex, glm::vec3 __p[8], float __val[8])
{
std::array<glm::vec3, 12> vl;
if (_edgeTable[__cubeIndex] & 1) vl[0] = _vInterp(__p[0], __p[1], __val[0], __val[1]);
if (_edgeTable[__cubeIndex] & 2) vl[1] = _vInterp(__p[1], __p[2], __val[1], __val[2]);
if (_edgeTable[__cubeIndex] & 4) vl[2] = _vInterp(__p[2], __p[3], __val[2], __val[3]);
if (_edgeTable[__cubeIndex] & 8) vl[3] = _vInterp(__p[3], __p[0], __val[3], __val[0]);
if (_edgeTable[__cubeIndex] & 16) vl[4] = _vInterp(__p[4], __p[5], __val[4], __val[5]);
if (_edgeTable[__cubeIndex] & 32) vl[5] = _vInterp(__p[5], __p[6], __val[5], __val[6]);
if (_edgeTable[__cubeIndex] & 64) vl[6] = _vInterp(__p[6], __p[7], __val[6], __val[7]);
if (_edgeTable[__cubeIndex] & 128) vl[7] = _vInterp(__p[7], __p[4], __val[7], __val[4]);
if (_edgeTable[__cubeIndex] & 256) vl[8] = _vInterp(__p[0], __p[4], __val[0], __val[4]);
if (_edgeTable[__cubeIndex] & 512) vl[9] = _vInterp(__p[1], __p[5], __val[1], __val[5]);
if (_edgeTable[__cubeIndex] & 1024) vl[10] = _vInterp(__p[2], __p[6], __val[2], __val[6]);
if (_edgeTable[__cubeIndex] & 2048) vl[11] = _vInterp(__p[3], __p[7], __val[3], __val[7]);
return vl;
}
constexpr static std::array<std::uint16_t, 256> _edgeTable =
{
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0
};
constexpr static std::array<std::array<char, 16>, 256> _triTable =
{{
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1},
{3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1},
{4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1},
{9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1},
{10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1},
{5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1},
{8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1},
{2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1},
{11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1},
{5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1},
{11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1},
{11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1},
{9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1},
{6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1},
{6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1},
{8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1},
{7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1},
{3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1},
{9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1},
{8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1},
{0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1},
{6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1},
{10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1},
{10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1},
{0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1},
{3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1},
{9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1},
{8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1},
{3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1},
{10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1},
{10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1},
{7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1},
{1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1},
{11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1},
{8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1},
{0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1},
{7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1},
{7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1},
{10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1},
{0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1},
{7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1},
{9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1},
{6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1},
{4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1},
{10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1},
{8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1},
{1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1},
{10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1},
{10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1},
{9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1},
{7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1},
{3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1},
{7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1},
{3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1},
{6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1},
{9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1},
{1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1},
{4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1},
{7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1},
{6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1},
{0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1},
{6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1},
{0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1},
{11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1},
{6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1},
{5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1},
{9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1},
{1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1},
{10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1},
{0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1},
{5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1},
{11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1},
{9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1},
{7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1},
{2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1},
{9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1},
{1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1},
{10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1},
{2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1},
{0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1},
{0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1},
{9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1},
{5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1},
{5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1},
{9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1},
{1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1},
{3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1},
{4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1},
{9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1},
{11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1},
{2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1},
{9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1},
{3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1},
{1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1},
{4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1},
{0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1},
{1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}
}};
};
|
GB_unop__identity_fp64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp64_fc64)
// op(A') function: GB (_unop_tran__identity_fp64_fc64)
// C type: double
// A type: GxB_FC64_t
// cast: double cij = (double) creal (aij)
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) creal (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) creal (aij) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp64_fc64)
(
double *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
double z = (double) creal (aij) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
double z = (double) creal (aij) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kernel.h |
/*
__kernel void pgain_kernel(
__global Point_Struct *p,
__global float *coord_h,
__global float * work_mem_h,
__global int *center_table,
__global char *switch_membership,
__local float *coord_s_acc,
int num,
int dim,
long x,
int K){
*/
/* block ID and global thread ID */
const int local_id = omp_get_thread_num();
const int thread_id = omp_get_team_num()*omp_get_num_threads()+local_id;
if(thread_id<num){
// coordinate mapping of point[x] to shared mem
if(local_id == 0)
for(int i=0; i<dim; i++){
coord_s_acc[i] = coord_h[i*num + x];
}
#pragma omp barrier
// cost between this point and point[x]: euclidean distance multiplied by weight
float x_cost = 0.0;
for(int i=0; i<dim; i++)
x_cost += (coord_h[(i*num)+thread_id]-coord_s_acc[i]) * (coord_h[(i*num)+thread_id]-coord_s_acc[i]);
x_cost = x_cost * p_h[thread_id].weight;
float current_cost = p_h[thread_id].cost;
int base = thread_id*(K+1);
// if computed cost is less then original (it saves), mark it as to reassign
if ( x_cost < current_cost ){
switch_membership[thread_id] = '1';
int addr_1 = base + K;
work_mem_h[addr_1] = x_cost - current_cost;
}
// if computed cost is larger, save the difference
else {
int assign = p_h[thread_id].assign;
int addr_2 = base + center_table[assign];
work_mem_h[addr_2] += current_cost - x_cost;
}
}
|
radix_sort.h | #ifndef _PCL_RADIX_SORT_
#define _PCL_RADIX_SORT_
#include <utility>
#include <limits>
#include "utils.h"
#ifndef BKT_BITS
#define BKT_BITS 12
#endif
template<typename T>
using Key_Value_Pair = std::pair<T, T>;
template<typename T>
Key_Value_Pair<T>* radix_sort_parallel(Key_Value_Pair<T>* inp_buf, Key_Value_Pair<T>* tmp_buf, int64_t elements_count, int64_t max_value)
{
constexpr int bkt_bits = BKT_BITS;
constexpr int nbkts = (1 << bkt_bits);
constexpr int bkt_mask = (nbkts - 1);
int maxthreads = omp_get_max_threads();
int histogram[nbkts*maxthreads], histogram_ps[nbkts*maxthreads + 1];
if(max_value == 0) return inp_buf;
int num_bits = 64;
if(sizeof(T) == 8 && max_value > std::numeric_limits<int>::max()) {
num_bits = sizeof(T) * 8 - __builtin_clzll(max_value);
} else {
num_bits = 32 - __builtin_clz((unsigned int)max_value);
}
int num_passes = (num_bits + bkt_bits - 1) / bkt_bits;
#pragma omp parallel
{
int tid = omp_get_thread_num();
int nthreads = omp_get_num_threads();
int * local_histogram = &histogram[nbkts*tid];
int * local_histogram_ps = &histogram_ps[nbkts*tid];
int elements_count_4 = elements_count/4*4;
Key_Value_Pair<T> * input = inp_buf;
Key_Value_Pair<T> * output = tmp_buf;
for(unsigned int pass = 0; pass < num_passes; pass++)
{
auto t1 = get_time();
// Step 1: compute histogram
// Reset histogram
for(int i = 0; i < nbkts; i++) local_histogram[i] = 0;
#pragma omp for schedule(static)
for(int64_t i = 0; i < elements_count_4; i+=4)
{
T val_1 = input[i].first;
T val_2 = input[i+1].first;
T val_3 = input[i+2].first;
T val_4 = input[i+3].first;
local_histogram[ (val_1>>(pass*bkt_bits)) & bkt_mask]++;
local_histogram[ (val_2>>(pass*bkt_bits)) & bkt_mask]++;
local_histogram[ (val_3>>(pass*bkt_bits)) & bkt_mask]++;
local_histogram[ (val_4>>(pass*bkt_bits)) & bkt_mask]++;
}
if(tid == (nthreads -1))
{
for(int64_t i = elements_count_4; i < elements_count; i++)
{
T val = input[i].first;
local_histogram[ (val>>(pass*bkt_bits)) & bkt_mask]++;
}
}
#pragma omp barrier
auto t11 = get_time();
// Step 2: prefix sum
if(tid == 0)
{
int sum = 0, prev_sum = 0;
for(int bins = 0; bins < nbkts; bins++) for(int t = 0; t < nthreads; t++) { sum += histogram[t*nbkts + bins]; histogram_ps[t*nbkts + bins] = prev_sum; prev_sum = sum; }
histogram_ps[nbkts*nthreads] = prev_sum; if(prev_sum != elements_count) { printf("Error1!\n"); exit(123); }
}
#pragma omp barrier
auto t12 = get_time();
// Step 3: scatter
#pragma omp for schedule(static)
for(int64_t i = 0; i < elements_count_4; i+=4)
{
T val_1 = input[i].first;
T val_2 = input[i+1].first;
T val_3 = input[i+2].first;
T val_4 = input[i+3].first;
T bin_1 = (val_1>>(pass*bkt_bits)) & bkt_mask;
T bin_2 = (val_2>>(pass*bkt_bits)) & bkt_mask;
T bin_3 = (val_3>>(pass*bkt_bits)) & bkt_mask;
T bin_4 = (val_4>>(pass*bkt_bits)) & bkt_mask;
int pos;
pos = local_histogram_ps[bin_1]++;
output[pos] = input[i];
pos = local_histogram_ps[bin_2]++;
output[pos] = input[i+1];
pos = local_histogram_ps[bin_3]++;
output[pos] = input[i+2];
pos = local_histogram_ps[bin_4]++;
output[pos] = input[i+3];
}
if(tid == (nthreads -1))
{
for(int64_t i = elements_count_4; i < elements_count; i++)
{
T val = input[i].first;
int pos = local_histogram_ps[ (val>>(pass*bkt_bits)) & bkt_mask]++;
output[pos] = input[i];
}
}
Key_Value_Pair<T> * temp = input; input = output; output = temp;
#pragma omp barrier
auto t2 = get_time();
#ifdef DEBUG_TIME
if (tid == 0) printf("pass = %d total time = %.3f step1 = %.3f step2 = %.3f %.3f\n", pass, t2-t1, t11-t1, t12-t11, t2-t12);
#endif
}
}
return (num_passes % 2 == 0 ? inp_buf : tmp_buf);
}
#endif
|
opencl_tc_fmt_plug.c | /*
* TrueCrypt volume OpenCL support to John The Ripper (RIPEMD-160 only)
*
* Based on CPU format originally written by Alain Espinosa <alainesp at
* gmail.com> in 2012.
* Copyright (c) 2015, magnum
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if HAVE_OPENCL
#define FMT_STRUCT fmt_ocl_tc
#if FMT_EXTERNS_H
extern struct fmt_main FMT_STRUCT;
#elif FMT_REGISTERS_H
john_register_one(&FMT_STRUCT);
#else
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "options.h"
#include "formats.h"
#include "crc32.h"
#include "johnswap.h"
#include "aes.h"
#include "pbkdf2_hmac_ripemd160.h"
#include "loader.h"
#include "common-opencl.h"
#define FORMAT_LABEL "truecrypt-opencl"
#define FORMAT_NAME "TrueCrypt AES256_XTS"
#define ALGORITHM_NAME "RIPEMD160 OpenCL"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
/* 64 is the actual maximum used by Truecrypt software as of version 7.1a */
#define PLAINTEXT_LENGTH 64
#define MAX_CIPHERTEXT_LENGTH (512*2+32)
#define SALT_SIZE sizeof(struct cust_salt)
#define SALT_ALIGN 4
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define TAG_RIPEMD160 "truecrypt_RIPEMD_160$"
#define TAG_RIPEMD160_LEN (sizeof(TAG_RIPEMD160)-1)
#define IS_RIPEMD160 2
#define MAX_PASSSZ 64
#define PASS_BUFSZ 256
#define KPOOL_SZ 64
#define MAX_KFILE_SZ 1048576 /* 1 MB */
#define MAX_KEYFILES 256
static unsigned char (*first_block_dec)[16];
unsigned char (*keyfiles_data)[MAX_KFILE_SZ];
int (*keyfiles_length);
#define KEYLEN PLAINTEXT_LENGTH
#define OUTLEN 64
#define SALTLEN 64
typedef struct {
unsigned int length;
unsigned char v[KEYLEN];
} pbkdf2_password;
typedef struct {
unsigned int v[(OUTLEN+3)/4];
} pbkdf2_hash;
typedef struct {
unsigned char salt[SALTLEN];
} pbkdf2_salt;
struct cust_salt {
unsigned char salt[64];
unsigned char bin[512-64];
int loop_inc;
int num_iterations;
int hash_type;
int nkeyfiles;
} *psalt;
static struct fmt_tests tests_ripemd160[] = {
{"truecrypt_RIPEMD_160$b9f118f89d2699cbe42cad7bc2c61b0822b3d6e57e8d43e79f55666aa30572676c3aced5f0900af223e9fcdf43ac39637640977f546eb714475f8e2dbf5368bfb80a671d7796d4a88c36594acd07081b7ef0fbead3d3a0ff2b295e9488a5a2747ed97905436c28c636f408b36b0898aad3c4e9566182bd55f80e97a55ad9cf20899599fb775f314067c9f7e6153b9544bfbcffb53eef5a34b515e38f186a2ddcc7cd3aed635a1fb4aab98b82d57341ec6ae52ad72e43f41aa251717082d0858bf2ccc69a7ca00daceb5b325841d70bb2216e1f0d4dc936b9f50ebf92dbe2abec9bc3babea7a4357fa74a7b2bcce542044552bbc0135ae35568526e9bd2afde0fa4969d6dc680cf96f7d82ec0a75b6170c94e3f2b6fd98f2e6f01db08ce63f1b6bcf5ea380ed6f927a5a8ced7995d83ea8e9c49238e8523d63d6b669ae0d165b94f1e19b49922b4748798129eed9aa2dae0d2798adabf35dc4cc30b25851a3469a9ee0877775abca26374a4176f8d237f8191fcc870f413ffdbfa73ee22790a548025c4fcafd40f631508f1f6c8d4c847e409c839d21ff146f469feff87198bc184db4b5c5a77f3402f491538503f68e0116dac76344b762627ad678de76cb768779f8f1c35338dd9f72dcc1ac337319b0e21551b9feb85f8cac67a2f35f305a39037bf96cd61869bf1761abcce644598dad254990d17f0faa4965926acb75abf", "password" },
{"truecrypt_RIPEMD_160$6ab053e5ebee8c56bce5705fb1e03bf8cf99e2930232e525befe1e45063aa2e30981585020a967a1c45520543847cdb281557e16c81cea9d329b666e232eeb008dbe3e1f1a181f69f073f0f314bc17e255d42aaa1dbab92231a4fb62d100f6930bae4ccf6726680554dea3e2419fb67230c186f6af2c8b4525eb8ebb73d957b01b8a124b736e45f94160266bcfaeda16b351ec750d980250ebb76672578e9e3a104dde89611bce6ee32179f35073be9f1dee8da002559c6fab292ff3af657cf5a0d864a7844235aeac441afe55f69e51c7a7c06f7330a1c8babae2e6476e3a1d6fb3d4eb63694218e53e0483659aad21f20a70817b86ce56c2b27bae3017727ff26866a00e75f37e6c8091a28582bd202f30a5790f5a90792de010aebc0ed81e9743d00518419f32ce73a8d3f07e55830845fe21c64a8a748cbdca0c3bf512a4938e68a311004538619b65873880f13b2a9486f1292d5c77116509a64eb0a1bba7307f97d42e7cfa36d2b58b71393e04e7e3e328a7728197b8bcdef14cf3f7708cd233c58031c695da5f6b671cc5066323cc86bb3c6311535ad223a44abd4eec9077d70ab0f257de5706a3ff5c15e3bc2bde6496a8414bc6a5ed84fe9462b65efa866312e0699e47338e879ae512a66f3f36fc086d2595bbcff2e744dd1ec283ba8e91299e62e4b2392608dd950ede0c1f3d5b317b2870ead59efe096c054ea1", "123" },
{NULL}
};
static cl_int cl_error;
static pbkdf2_password *inbuffer;
static pbkdf2_hash *outbuffer;
static pbkdf2_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
static size_t insize, outsize, settingsize;
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(pbkdf2_password) * gws;
outsize = sizeof(pbkdf2_hash) * gws;
settingsize = sizeof(pbkdf2_salt);
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
first_block_dec = mem_calloc(gws, sizeof(*first_block_dec));
keyfiles_data = mem_calloc(MAX_KEYFILES, sizeof(*keyfiles_data));
keyfiles_length = mem_calloc(MAX_KEYFILES, sizeof(int));
}
static void release_clobj(void)
{
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(first_block_dec);
MEM_FREE(keyfiles_data);
MEM_FREE(keyfiles_length);
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
(int)sizeof(inbuffer->v),
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_ripemd160_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "pbkdf2_ripemd160",
&cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1,
self, create_clobj, release_clobj,
sizeof(pbkdf2_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static int valid(char* ciphertext, struct fmt_main *self)
{
unsigned int i;
char *p, *q;
int nkeyfiles = -1;
if (strncmp(ciphertext, TAG_RIPEMD160, TAG_RIPEMD160_LEN))
return 0;
ciphertext += TAG_RIPEMD160_LEN;
p = ciphertext;
q = strchr(p, '$');
if (!q) { /* no keyfiles */
if (strlen(ciphertext) != 512*2)
return 0;
} else {
if (q - p != 512 * 2)
return 0;
/* check keyfile(s) */
p = q + 1;
nkeyfiles = atoi(p);
if (nkeyfiles > MAX_KEYFILES || nkeyfiles < 1)
return 0;
}
for (i = 0; i < 512*2; i++) {
if (atoi16l[ARCH_INDEX(ciphertext[i])] == 0x7F)
return 0;
}
return 1;
}
static void set_salt(void *salt)
{
psalt = salt;
memcpy((char*)currentsalt.salt, psalt->salt, SALTLEN);
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy salt to gpu");
}
static void* get_salt(char *ciphertext)
{
static char buf[sizeof(struct cust_salt)+4];
struct cust_salt *s = (struct cust_salt*)mem_align(buf, 4);
unsigned int i;
char tpath[PATH_BUFFER_SIZE] = { 0 };
char *p, *q;
int idx;
FILE *fp;
size_t sz;
memset(s, 0, sizeof(struct cust_salt));
s->loop_inc = 1;
ciphertext += TAG_RIPEMD160_LEN;
s->hash_type = IS_RIPEMD160;
s->num_iterations = 2000;
// Convert the hexadecimal salt in binary
for (i = 0; i < 64; i++)
s->salt[i] = (atoi16[ARCH_INDEX(ciphertext[2*i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2*i+1])];
for (; i < 512; i++)
s->bin[i-64] = (atoi16[ARCH_INDEX(ciphertext[2*i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2*i+1])];
p = ciphertext;
q = strchr(p, '$');
if (!q) /* no keyfiles */
return s;
// process keyfile(s)
p = q + 1;
s->nkeyfiles = atoi(p);
for (idx = 0; idx < s->nkeyfiles; idx++) {
p = strchr(p, '$') + 1; // at first filename
q = strchr(p, '$');
if (!q) { // last file
memset(tpath, 0, sizeof(tpath) - 1);
strncpy(tpath, p, sizeof(tpath));
} else {
memset(tpath, 0, sizeof(tpath) - 1);
strncpy(tpath, p, q-p);
}
/* read this into keyfiles_data[idx] */
fp = fopen(tpath, "rb");
if (!fp)
pexit("fopen %s", p);
if (fseek(fp, 0L, SEEK_END) == -1)
pexit("fseek");
sz = ftell(fp);
if (fseek(fp, 0L, SEEK_SET) == -1)
pexit("fseek");
if (fread(keyfiles_data[idx], 1, sz, fp) != sz)
pexit("fread");
keyfiles_length[idx] = sz;
fclose(fp);
}
return s;
}
static void AES_256_XTS_first_sector(const unsigned char *double_key,
unsigned char *out,
const unsigned char *data,
unsigned len) {
unsigned char tweak[16] = { 0 };
unsigned char buf[16];
int i, j, cnt;
AES_KEY key1, key2;
AES_set_decrypt_key(double_key, 256, &key1);
AES_set_encrypt_key(&double_key[32], 256, &key2);
// first aes tweak (we do it right over tweak
AES_encrypt(tweak, tweak, &key2);
cnt = len/16;
for (j=0;;) {
for (i = 0; i < 16; ++i) buf[i] = data[i]^tweak[i];
AES_decrypt(buf, out, &key1);
for (i = 0; i < 16; ++i) out[i]^=tweak[i];
++j;
if (j == cnt)
break;
else {
unsigned char Cin, Cout;
unsigned x;
Cin = 0;
for (x = 0; x < 16; ++x) {
Cout = (tweak[x] >> 7) & 1;
tweak[x] = ((tweak[x] << 1) + Cin) & 0xFF;
Cin = Cout;
}
if (Cout)
tweak[0] ^= 135; //GF_128_FDBK;
}
data += 16;
out += 16;
}
}
static int apply_keyfiles(unsigned char *pass, size_t pass_memsz, int nkeyfiles)
{
int pl, k;
unsigned char *kpool;
unsigned char *kdata;
int kpool_idx;
size_t i, kdata_sz;
uint32_t crc;
if (pass_memsz < MAX_PASSSZ) {
error();
}
pl = strlen((char*)pass);
memset(pass+pl, 0, MAX_PASSSZ-pl);
if ((kpool = mem_calloc(1, KPOOL_SZ)) == NULL) {
error();
}
for (k = 0; k < nkeyfiles; k++) {
kpool_idx = 0;
kdata_sz = keyfiles_length[k];
kdata = keyfiles_data[k];
crc = ~0U;
for (i = 0; i < kdata_sz; i++) {
crc = jtr_crc32(crc, kdata[i]);
kpool[kpool_idx++] += (unsigned char)(crc >> 24);
kpool[kpool_idx++] += (unsigned char)(crc >> 16);
kpool[kpool_idx++] += (unsigned char)(crc >> 8);
kpool[kpool_idx++] += (unsigned char)(crc);
/* Wrap around */
if (kpool_idx == KPOOL_SZ)
kpool_idx = 0;
}
}
/* Apply keyfile pool to passphrase */
for (i = 0; i < KPOOL_SZ; i++)
pass[i] += kpool[i];
MEM_FREE(kpool);
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int i;
const int count = *pcount;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
if (psalt->nkeyfiles) {
#if _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++) {
apply_keyfiles(inbuffer[i].v, 64, psalt->nkeyfiles);
inbuffer[i].length = 64;
}
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
if (ocl_autotune_running)
return count;
#if _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++) {
AES_256_XTS_first_sector((unsigned char*)outbuffer[i].v, first_block_dec[i], psalt->bin, 16);
}
return count;
}
static int cmp_all(void* binary, int count)
{
int i;
for (i = 0; i < count; ++i) {
if (!memcmp(first_block_dec[i], "TRUE", 4))
return 1;
}
return 0;
}
static int cmp_one(void* binary, int index)
{
if (!memcmp(first_block_dec[index], "TRUE", 4))
return 1;
return 0;
}
static int cmp_crc32s(unsigned char *given_crc32, CRC32_t comp_crc32) {
return given_crc32[0] == ((comp_crc32>>24)&0xFF) &&
given_crc32[1] == ((comp_crc32>>16)&0xFF) &&
given_crc32[2] == ((comp_crc32>> 8)&0xFF) &&
given_crc32[3] == ((comp_crc32>> 0)&0xFF);
}
static int cmp_exact(char *source, int idx)
{
unsigned char key[64];
unsigned char decr_header[512-64];
CRC32_t check_sum;
int ksz = inbuffer[idx].length;
memcpy(key, inbuffer[idx].v, inbuffer[idx].length);
/* process keyfile(s) */
if (psalt->nkeyfiles) {
apply_keyfiles(key, 64, psalt->nkeyfiles);
ksz = 64;
}
pbkdf2_ripemd160(key, ksz, psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0);
AES_256_XTS_first_sector(key, decr_header, psalt->bin, 512-64);
if (memcmp(decr_header, "TRUE", 4))
return 0;
CRC32_Init(&check_sum);
CRC32_Update(&check_sum, &decr_header[256-64], 256);
if (!cmp_crc32s(&decr_header[8], ~check_sum))
return 0;
CRC32_Init(&check_sum);
CRC32_Update(&check_sum, decr_header, 256-64-4);
if (!cmp_crc32s(&decr_header[256-64-4], ~check_sum))
return 0;
return 1;
}
#undef set_key
static void set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int salt_hash(void *salt)
{
unsigned v=0, i;
struct cust_salt *psalt = (struct cust_salt*)salt;
for (i = 0; i < 64; ++i) {
v *= 11;
v += psalt->salt[i];
}
return v & (SALT_HASH_SIZE - 1);
}
struct fmt_main FMT_STRUCT = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ TAG_RIPEMD160 },
tests_ripemd160
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
detector.c | #include "darknet.h"
#include <unistd.h>
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = calloc(ngpus, sizeof(network));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
double time;
int count = 0;
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
printf("Resizing\n");
int dim = (rand() % 10 + 10) * 32;
if (get_current_batch(net)+200 > net->max_batches) dim = 608;
//int dim = (rand() % 4 + 16) * 32;
printf("%d\n", dim);
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
time=what_time_is_it_now();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
time=what_time_is_it_now();
float loss = 0;
#ifdef GPU
if(ngpus == 1){
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
if(i%100==0){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class = j;
if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) do_nms_sort(dets, num, classes, nms);
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) do_nms_sort(dets, nboxes, classes, nms);
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths("data/coco_val_5k.list");
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) do_nms_obj(dets, nboxes, 1, nms);
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void test_detector_folder( char *datacfg, /* cfg/coco.data */
char *cfgfile, /* 配置文件 */
char *weightfile, /* 权重文件 */
char *input_folder, /* 输入文件路径 */
char *output_folder, /* 输出文件路径 */
float thresh, /* 阈值 */
float hier_thresh)
{
// if( !(input_folder && output_folder) ){
// printf("Please Provide Image Folder");
// return;
// }
if( !input_folder ){
printf("Please Provide Input Image Folder");
return;
}
if( !output_folder ){
printf("Please Provide Output Image Folder");
return;
}
// 加载 coco.data
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list"); // darknet/data/coco.names
char **names = get_labels(name_list);
// 读取标签.
image **alphabet = load_alphabet();
// 加载配置文件和权重文件.
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
// 输入图像和输出图像
char buff[256],buff2[256];
char *input_img_name = buff;
char *output_file = buff2;
float nms=.45;
// TODO
FILE *fp;
int save_result_txt = 1;
int img_counter = -1;
while(1)
{
img_counter++;
// 输入输出图像名相同.
strncpy(input_img_name, input_folder, 256);
char frame_index_c[256];
// sprintf(frame_index_c,"/frame%04d.jpg",img_counter); // Important!!! change file name
sprintf(frame_index_c,"/%04d_rgb_raw.jpg",img_counter); // format into 6 digit
// sprintf(frame_index_c,"/%04d.png",img_counter);
strcat(input_img_name,frame_index_c);
if( access( input_img_name, F_OK ) == -1 ) {
printf("Cannot find image %s \n",input_img_name);
break;
}
// 输出文件路径.
strncpy(output_file, output_folder, 256);
// NOTE 保存 txt 检测结果.
if (save_result_txt==1)
{
char frame_index_c3[256];
sprintf(frame_index_c3,"_txts/%04d_yolo2_%.2f.txt",img_counter,thresh); // format into 6 digit
char * result_file=strcat(output_file,frame_index_c3);
// printf("save to txt: %s \n",result_file);
fp = fopen(result_file,"w+");
if (fp == NULL)
{
printf("Cannot save to file %s \n",result_file);
break;
}
}
strncpy(output_file, output_folder, 256);
char frame_index_c2[256];
sprintf(frame_index_c2,"/%04d_yolo2_%.2f",img_counter,thresh); // format into 6 digit
strcat(output_file,frame_index_c2);
image im = load_image_color(input_img_name,0,0);
image sized = letterbox_image(im, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = sized.data;
time=what_time_is_it_now();
network_predict(net, X);
if (img_counter%10==0)
printf("%s: Predicted in %f seconds.\n", input_img_name, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
if (save_result_txt==0)
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes); // if want to show classes, prob in terminal. See inside function.
else
draw_save_detections(im, dets, nboxes, thresh, names, alphabet, l.classes,fp);
free_detections(dets, nboxes);
save_image(im, output_file);
free_image(im);
free_image(sized);
if (save_result_txt==1)
fclose(fp);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
image sized = letterbox_image(im, net->w, net->h);
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
float *X = sized.data;
time=what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
//printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes);
free_detections(dets, nboxes);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
cvNamedWindow("predictions", CV_WINDOW_NORMAL);
if(fullscreen){
cvSetWindowProperty("predictions", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
}
show_image(im, "predictions");
cvWaitKey(0);
cvDestroyAllWindows();
#endif
}
free_image(im);
free_image(sized);
if (filename) break;
}
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
}
*/
void run_detector(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .5);
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
|
CommonParallel.h | #ifndef COMMON_PARALLEL_H_
#define COMMON_PARALLEL_H_
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <stdarg.h>
#include <string.h>
#include <time.h>
#ifdef __UPC__
#include <upc.h>
#elif defined _OPENMP
#include <omp.h>
#elif defined USE_MPI
#include <mpi.h>
#else
#warn "No parallelism has been chosen at compile time... did you want OpenMP (cc -fopenmp), MPI (mpicc -DUSE_MPI) or UPC (upcc)?"
#endif
#if defined (__cplusplus)
extern "C" {
#endif
/* First defined the EXIT_FUNC that will be used */
#ifdef __UPC__
/* UPC */
#ifdef _OPENMP
#error "You can not currently specify OpenMP AND UPC"
#endif
#define EXIT_FUNC(code) upc_global_exit(code)
#define INIT(argc, argv) { /* noop */ }
#define FINALIZE() { /* noop */ }
#else
#ifdef MPI_VERSION
/* MPI */
#ifdef _OPENMP
#error "You can not currently specify OpenMP AND MPI"
#endif
#define EXIT_FUNC(code) do { MPI_Abort(MPI_COMM_WORLD, code); exit(code); } while (0)
#define INIT(argc, argv) MPI_Init(&argc, &argv)
#define FINALIZE() MPI_Finalize()
#else
/* OpenMP */
#define EXIT_FUNC(x) exit(x)
#ifdef _OPENMP
#define INIT(argc, argv) _Pragma("omp parallel") {
#define FINALIZE() }
#else
#define INIT(argc, argv) {
#define FINALIZE() }
#endif
#endif
#endif
#ifndef VERBOSE
#define VERBOSE 0
#endif
#ifndef LOG
static void writeMyLog(int level, const char *fmt, ...);
#define LOG(level, fmt, ...) do { if (VERBOSE >= level) { writeMyLog(level, fmt, ##__VA_ARGS__); } } while (0)
#endif
#ifndef DIE
static inline int *hasMyLog();
static inline void closeMyLog();
#define DIE(fmt,...) \
do { \
fprintf(stderr, COLOR_RED "Thread %d, DIE [%s:%d]: " COLOR_NORM fmt, \
MYTHREAD, __FILE__, __LINE__, ##__VA_ARGS__); \
fflush(stderr); \
if (*hasMyLog()) { \
LOG(0, COLOR_RED "DIE [%s:%d]: " COLOR_NORM fmt, \
__FILE__, __LINE__, ##__VA_ARGS__); \
closeMyLog(); \
} \
EXIT_FUNC(1); \
} while (0)
#endif
#ifndef CHECK_ERR
#define CHECK_ERR(cmd, val) \
do { \
int err; \
if ((err = cmd) != val) \
DIE("Thread %d, " #cmd " failed, error %s\n", MYTHREAD, strerror(err)); \
} while (0)
#endif
#ifdef __UPC__
// UPC
#include <upc.h>
#include "upc_compatiblity.h"
#pragma message "Using UPC CommonParallel.h"
#define BARRIER upc_barrier
#define NOW() UPC_TICKS_TO_SECS( UPC_TICKS_NOW() )
#else // NOT UPC
#ifdef MPI_VERSION
// // MPI, ensure mpi.h is loaded
#include <mpi.h>
#pragma message "Using MPI CommonParallel.h"
#define CHECK_MPI(x) CHECK_ERR(x, MPI_SUCCESS)
static inline int __get_THREADS() {
static int size = -1;
if (size < 0)
MPI_Comm_size(MPI_COMM_WORLD, &size);
return size;
}
static inline int __get_MYTHREAD() {
static int rank = -1;
if (rank < 0)
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
return rank;
}
#define BARRIER do { LOG(3, "Starting barrier at %d %s\n", __LINE__, __FILE__); CHECK_MPI(MPI_Barrier(MPI_COMM_WORLD)); } while (0)
#define NOW() MPI_Wtime()
#else
// OpenMP or fake it!
#ifdef _OPENMP
#include <omp.h>
#pragma message "Using OpenMP in CommonParallel.h"
// OpenMP, ensure omp.h is loaded
#else
#warning "No Parallel framework has been detected, assuming serial execution, making dummy OpenMP API functions in CommonParallel.h"
#define omp_get_thread_num() 0
#define omp_get_num_threads() 1
#define omp_get_max_threads() 1
#define omp_in_parallel() 0
static void omp_set_num_threads(int i) {}
static void omp_set_nested(int i) {}
#endif
static inline int __get_THREADS() {
return omp_get_num_threads();
}
static inline int __get_MYTHREAD() {
return omp_get_thread_num();
}
static inline void __barrier(const char * file, int line) {
LOG(1, "Barrier: %s:%d-%d\n", file, line, __get_MYTHREAD());
#pragma omp barrier
LOG(2, "Past Barrier %s:%d-%d\n", __get_MYTHREAD(),file, line, __get_MYTHREAD());
}
static inline double __get_seconds() {
struct timeval tv;
gettimeofday(&tv,NULL);
return ((long long) tv.tv_usec + 1000000 * (long long) tv.tv_sec) / (double) 1000000.0;
}
#define BARRIER __barrier(__FILE__, __LINE__)
#define NOW() __get_seconds()
#endif
#ifdef THREADS
#error "UPC is incompatible with this flavor of this code"
#endif
#ifdef MYTHREAD
#error "UPC is incompatible with this flavor of this code"
#endif
#define THREADS (__get_THREADS())
#define MYTHREAD (__get_MYTHREAD())
#endif
#ifndef _MESSAGE_MACROS
#define _MESSAGE_MACROS
#define COLOR_NORM "\x1B[0m"
#define COLOR_RED "\x1B[91m"
#define COLOR_GREEN "\x1B[32m"
static inline int *hasMyLog() {
static int _log = 0;
return &_log;
}
typedef struct { FILE *f; } FILE2;
static inline FILE2 *_getMyLog() {
int i;
#ifdef __UPC__
static shared[1] FILE2 _mylog[THREADS];
assert(upc_threadof(&(_mylog[MYTHREAD])) == MYTHREAD);
assert(upc_threadof(_mylog + MYTHREAD) == MYTHREAD);
FILE2 *f2 = (FILE2*) &(_mylog[MYTHREAD].f);
assert(f2 != NULL);
if (f2->f == NULL) {
f2->f = stderr;
(*hasMyLog())++;
}
return f2;
#else
static FILE2 *_mylog = NULL;
#ifdef MPI_VERSION
if(_mylog == NULL) {
_mylog = calloc(1, sizeof(FILE2));
_mylog->f = stderr;
(*hasMyLog())++;
}
assert(_mylog->f != NULL);
return _mylog;
#else
#pragma omp threadprivate(_mylog)
if (_mylog == NULL) {
_mylog = calloc(1, sizeof(FILE2));
_mylog->f = stderr;
}
assert(_mylog->f != NULL);
return _mylog;
#endif
#endif
}
static inline FILE *getMyLog() {
FILE2 *_mylog = _getMyLog();
assert(_mylog != NULL);
FILE *mylog = (FILE*) _mylog->f;
assert(mylog != NULL);
return mylog;
}
static inline void closeMyLog() {
if (*hasMyLog()) {
FILE *mylog = getMyLog();
if (mylog != stderr && mylog != stdout) {
fclose(getMyLog());
_getMyLog()->f = stderr;
}
}
}
static inline void _setMyLog(const char *myfile) {
FILE2 *mylog = _getMyLog();
assert(mylog != NULL);
mylog->f = fopen(myfile, "w+");
if (mylog->f == NULL) DIE("Could not open %s for writing to a log!\n", myfile);
assert(((FILE*) mylog->f) == getMyLog());
BARRIER;
}
static inline void setMyLog(const char *prefix) {
char myfile[384];
snprintf(myfile, 384, "%s.%6dof%6d", prefix, MYTHREAD, THREADS);
_setMyLog(myfile);
}
static void writeMyLog(int level, const char *fmt, ...) {
time_t rawtime; struct tm *timeinfo;
time( &rawtime ); timeinfo = localtime( &rawtime );
char newfmt[1024];
snprintf(newfmt, 1024, "Thread %d [%s %.19s]: %s", MYTHREAD,
level == 0 ? "ALL" : (level == 1 ? "INFO" : "DEBUG"), asctime(timeinfo), fmt);
va_list args;
va_start(args, fmt);
vfprintf(getMyLog(), newfmt, args);
va_end(args);
}
#define SLOG(level, fmt, ...) if (MYTHREAD == 0) LOG(level, fmt, ##__VA_ARGS__)
#define LOG2(level, fmt, ...) do {fprintf(getMyLog(), fmt, ##__VA_ARGS__) } while (0)
#define LOG_FLUSH(level, fmt, ...) do { LOG(level, fmt, ##__VA_ARGS__); fflush(getMyLog()); } while (0)
#define SLOG_FLUSH(level, fmt, ...) do { SLOG(level, fmt, ##__VA_ARS__); fflush(getMyLog()); } while (0)
#define SDIE(fmt,...) \
do { \
if (!MYTHREAD) { \
fprintf(stderr, COLOR_RED "Thread %d, DIE [%s:%d]: " fmt COLOR_NORM, \
MYTHREAD, __FILE__, __LINE__, ##__VA_ARGS__); \
fflush(stderr); \
} \
if (*hasMyLog()) { \
LOG(0, COLOR_RED "DIE [%s:%d]: " COLOR_NORM fmt, \
__FILE__, __LINE__, ##__VA_ARGS__); \
closeMyLog(); \
} \
EXIT_FUNC(1); \
} while (0)
#define WARN(fmt,...) \
do { \
fprintf(stderr, COLOR_RED "Thread %d, WARN [%s:%d]: " fmt COLOR_NORM, \
MYTHREAD, __FILE__, __LINE__, ##__VA_ARGS__); \
} while (0)
#endif // _MESSAGE_MACROS
#if defined (__cplusplus)
}
#endif
#endif // COMMON_PARALLEL_H_
|
NearestNeighbor.h | #pragma once
#include <flann/flann.hpp>
#include "Eigen.h"
struct Match {
int idx;
float weight;
};
class NearestNeighborSearch {
public:
virtual ~NearestNeighborSearch() {}
virtual void setMatchingMaxDistance(float maxDistance) {
m_maxDistance = maxDistance;
}
virtual void buildIndex(const std::vector<Eigen::Vector3f>& targetPoints) = 0;
virtual std::vector<Match> queryMatches(const std::vector<Vector3f>& transformedPoints) = 0;
protected:
float m_maxDistance;
NearestNeighborSearch() : m_maxDistance{ 0.005f } {}
};
/**
* Brute-force nearest neighbor search.
*/
class NearestNeighborSearchBruteForce : public NearestNeighborSearch {
public:
NearestNeighborSearchBruteForce() : NearestNeighborSearch() {}
void buildIndex(const std::vector<Eigen::Vector3f>& targetPoints) {
m_points = targetPoints;
}
std::vector<Match> queryMatches(const std::vector<Vector3f>& transformedPoints) {
const unsigned nMatches = transformedPoints.size();
std::vector<Match> matches(nMatches);
const unsigned nTargetPoints = m_points.size();
std::cout << "nMatches: " << nMatches << std::endl;
std::cout << "nTargetPoints: " << nTargetPoints << std::endl;
#pragma omp parallel for
for (int i = 0; i < (int)nMatches; i++) {
matches[i] = getClosestPoint(transformedPoints[i]);
}
return matches;
}
private:
std::vector<Eigen::Vector3f> m_points;
Match getClosestPoint(const Vector3f& p) {
int idx = -1;
float minDist = std::numeric_limits<float>::max();
for (unsigned int i = 0; i < m_points.size(); ++i) {
float dist = (p - m_points[i]).norm();
if (minDist > dist) {
idx = i;
minDist = dist;
}
}
if (minDist <= m_maxDistance)
return Match{ idx, 1.f };
else
return Match{ -1, 0.f };
}
};
/**
* Nearest neighbor search using FLANN.
*/
class NearestNeighborSearchFlann : public NearestNeighborSearch {
public:
NearestNeighborSearchFlann() :
NearestNeighborSearch(),
m_nTrees{ 1 },
m_index{ nullptr },
m_flatPoints{ nullptr }
{ }
~NearestNeighborSearchFlann() {
if (m_index) {
delete m_flatPoints;
delete m_index;
m_flatPoints = nullptr;
m_index = nullptr;
}
}
void buildIndex(const std::vector<Eigen::Vector3f>& targetPoints) {
std::cout << "Initializing FLANN index with " << targetPoints.size() << " points." << std::endl;
// FLANN requires that all the points be flat. Therefore we copy the points to a separate flat array.
m_flatPoints = new float[targetPoints.size() * 3];
for (size_t pointIndex = 0; pointIndex < targetPoints.size(); pointIndex++) {
for (size_t dim = 0; dim < 3; dim++) {
m_flatPoints[pointIndex * 3 + dim] = targetPoints[pointIndex][dim];
}
}
flann::Matrix<float> dataset(m_flatPoints, targetPoints.size(), 3);
// Building the index takes some time.
m_index = new flann::Index<flann::L2<float>>(dataset, flann::KDTreeIndexParams(m_nTrees));
m_index->buildIndex();
std::cout << "FLANN index created." << std::endl;
}
std::vector<Match> queryMatches(const std::vector<Vector3f>& transformedPoints) {
if (!m_index) {
std::cout << "FLANN index needs to be build before querying any matches." << std::endl;
return {};
}
// FLANN requires that all the points be flat. Therefore we copy the points to a separate flat array.
float* queryPoints = new float[transformedPoints.size() * 3];
for (size_t pointIndex = 0; pointIndex < transformedPoints.size(); pointIndex++) {
for (size_t dim = 0; dim < 3; dim++) {
queryPoints[pointIndex * 3 + dim] = transformedPoints[pointIndex][dim];
}
}
flann::Matrix<float> query(queryPoints, transformedPoints.size(), 3);
flann::Matrix<int> indices(new int[query.rows * 1], query.rows, 1);
flann::Matrix<float> distances(new float[query.rows * 1], query.rows, 1);
// Do a knn search, searching for 1 nearest point and using 16 checks.
flann::SearchParams searchParams{ 16 };
searchParams.cores = 0;
m_index->knnSearch(query, indices, distances, 1, searchParams);
// Filter the matches.
const unsigned nMatches = transformedPoints.size();
std::vector<Match> matches;
matches.reserve(nMatches);
for (int i = 0; i < (int)nMatches; ++i) {
if (*distances[i] <= m_maxDistance)
matches.push_back(Match{ *indices[i], 1.f });
else
matches.push_back(Match{ -1, 0.f });
}
// Release the memory.
delete[] query.ptr();
delete[] indices.ptr();
delete[] distances.ptr();
return matches;
}
private:
int m_nTrees;
flann::Index<flann::L2<float>>* m_index;
float* m_flatPoints;
};
|
retinaface.h | #ifndef _RETINAFACE_H_
#define _RETINAFACE_H_
#include "../detecter.h"
#include "ncnn/net.h"
/*
namespace mirror {
using ANCHORS = std::vector<cv::Rect>;
class RetinaFace : public Detecter {
public:
RetinaFace();
~RetinaFace();
int LoadModel(const char* root_path);
int DetectFace(const cv::Mat& img_src, std::vector<FaceInfo>* faces);
private:
ncnn::Net* retina_net_;
std::vector<ANCHORS> anchors_generated_;
bool initialized_;
const int RPNs_[3] = { 32, 16, 8 };
const cv::Size inputSize_ = { 300, 300 };
const float iouThreshold_ = 0.4f;
const float scoreThreshold_ = 0.8f;
};
}
*/
namespace mirror {
using ANCHORS = std::vector<cv::Rect>;
class RetinaFace : public Detecter {
public:
RetinaFace();
void Init(const std::string &model_param, const std::string &model_bin);
RetinaFace(const std::string &model_param, const std::string &model_bin, bool retinaface = false);
inline void Release();
void nms(std::vector<bbox> &input_boxes, float NMS_THRESH);
void Detect(const cv::Mat & img_src, std::vector<bbox>& boxes);
void create_anchor(std::vector<box> &anchor, int w, int h);
void create_anchor_retinaface(std::vector<box> &anchor, int w, int h);
inline void SetDefaultParams();
static inline bool cmp(bbox a, bbox b);
int LoadModel(const char* root_path);
int DetectFace(const cv::Mat& img_src, std::vector<bbox>& faces);
~RetinaFace();
public:
float _nms=0.5;
float _threshold=0.5;
float _mean_val[3]={104.f, 117.f, 123.f};
bool _retinaface;
ncnn::Net* retina_net_;
// ncnn::Net *Net;
private:
std::vector<ANCHORS> anchors_generated_;
bool initialized_;
const int RPNs_[3] = { 32, 16, 8 };
const cv::Size inputSize_ = { 300, 300 };
const float iouThreshold_ = 0.4f;
const float scoreThreshold_ = 0.8f;
};
}
/*
class Detectorss
{
public:
Detectorss();
void Init(const std::string &model_param, const std::string &model_bin);
Detectorss(const std::string &model_param, const std::string &model_bin, bool retinaface = false);
inline void Release();
void nms(std::vector<bbox> &input_boxes, float NMS_THRESH);
void Detect(const cv::Mat& img_src, std::vector<bbox>& boxes);
void create_anchor(std::vector<box> &anchor, int w, int h);
void create_anchor_retinaface(std::vector<box> &anchor, int w, int h);
inline void SetDefaultParams();
static inline bool cmp(bbox a, bbox b);
~Detectorss();
public:
float _nms;
float _threshold;
float _mean_val[3];
bool _retinaface;
ncnn::Net *Net;
};
RetinaFace::~RetinaFace(){
Release();
}
inline void Detectorss::Release(){
if (Net != nullptr)
{
delete Net;
Net = nullptr;
}
}
Detectorss::Detectorss(const std::string &model_param, const std::string &model_bin, bool retinaface):
_nms(0.5),
_threshold(0.5),
_mean_val{104.f, 117.f, 123.f},
_retinaface(retinaface),
Net(new ncnn::Net())
{
Init(model_param, model_bin);
}
inline bool Detectorss::cmp(bbox a, bbox b) {
if (a.s > b.s)
return true;
return false;
}
inline void Detectorss::SetDefaultParams(){
_nms = 0.4;
_threshold = 0.6;
_mean_val[0] = 104;
_mean_val[1] = 117;
_mean_val[2] = 123;
Net = nullptr;
}
void Detectorss::Init(const std::string &model_param, const std::string &model_bin)
{
int ret = Net->load_param(model_param.c_str());
ret = Net->load_model(model_bin.c_str());
}
void Detectorss::create_anchor(std::vector<box> &anchor, int w, int h)
{
// anchor.reserve(num_boxes);
anchor.clear();
std::vector<std::vector<int> > feature_map(4), min_sizes(4);
float steps[] = {8, 16, 32, 64};
for (int i = 0; i < feature_map.size(); ++i) {
feature_map[i].push_back(ceil(h/steps[i]));
feature_map[i].push_back(ceil(w/steps[i]));
}
std::vector<int> minsize1 = {10, 16, 24};
min_sizes[0] = minsize1;
std::vector<int> minsize2 = {32, 48};
min_sizes[1] = minsize2;
std::vector<int> minsize3 = {64, 96};
min_sizes[2] = minsize3;
std::vector<int> minsize4 = {128, 192, 256};
min_sizes[3] = minsize4;
for (int k = 0; k < feature_map.size(); ++k)
{
std::vector<int> min_size = min_sizes[k];
for (int i = 0; i < feature_map[k][0]; ++i)
{
for (int j = 0; j < feature_map[k][1]; ++j)
{
for (int l = 0; l < min_size.size(); ++l)
{
float s_kx = min_size[l]*1.0/w;
float s_ky = min_size[l]*1.0/h;
float cx = (j + 0.5) * steps[k]/w;
float cy = (i + 0.5) * steps[k]/h;
box axil = {cx, cy, s_kx, s_ky};
anchor.push_back(axil);
}
}
}
}
}
void Detectorss::create_anchor_retinaface(std::vector<box> &anchor, int w, int h)
{
// anchor.reserve(num_boxes);
anchor.clear();
std::vector<std::vector<int> > feature_map(3), min_sizes(3);
float steps[] = {8, 16, 32};
for (int i = 0; i < feature_map.size(); ++i) {
feature_map[i].push_back(ceil(h/steps[i]));
feature_map[i].push_back(ceil(w/steps[i]));
}
std::vector<int> minsize1 = {10, 20};
min_sizes[0] = minsize1;
std::vector<int> minsize2 = {32, 64};
min_sizes[1] = minsize2;
std::vector<int> minsize3 = {128, 256};
min_sizes[2] = minsize3;
// TODO:
for (int k = 0; k < feature_map.size(); ++k) // 3
{
std::vector<int> min_size = min_sizes[k];
for (int i = 0; i < feature_map[k][0]; ++i)
{
for (int j = 0; j < feature_map[k][1]; ++j)
{
for (int l = 0; l < min_size.size(); ++l)
{
float s_kx = min_size[l]*1.0/w;
float s_ky = min_size[l]*1.0/h;
float cx = (j + 0.5) * steps[k]/w;
float cy = (i + 0.5) * steps[k]/h;
box axil = {cx, cy, s_kx, s_ky};
anchor.push_back(axil);
}
}
}
}
}
void Detectorss::nms(std::vector<bbox> &input_boxes, float NMS_THRESH)
{
std::vector<float>vArea(input_boxes.size());
for (int i = 0; i < int(input_boxes.size()); ++i)
{
vArea[i] = (input_boxes.at(i).x2 - input_boxes.at(i).x1 + 1)
* (input_boxes.at(i).y2 - input_boxes.at(i).y1 + 1);
}
for (int i = 0; i < int(input_boxes.size()); ++i)
{
for (int j = i + 1; j < int(input_boxes.size());)
{
float xx1 = std::max(input_boxes[i].x1, input_boxes[j].x1);
float yy1 = std::max(input_boxes[i].y1, input_boxes[j].y1);
float xx2 = std::min(input_boxes[i].x2, input_boxes[j].x2);
float yy2 = std::min(input_boxes[i].y2, input_boxes[j].y2);
float w = std::max(float(0), xx2 - xx1 + 1);
float h = std::max(float(0), yy2 - yy1 + 1);
float inter = w * h;
float ovr = inter / (vArea[i] + vArea[j] - inter);
if (ovr >= NMS_THRESH)
{
input_boxes.erase(input_boxes.begin() + j);
vArea.erase(vArea.begin() + j);
}
else
{
j++;
}
}
}
}
void Detectorss::Detect(const cv::Mat& img_src, std::vector<bbox>& boxes)
{
ncnn::Mat in = ncnn::Mat::from_pixels_resize(img_src.data, ncnn::Mat::PIXEL_BGR, img_src.cols, img_src.rows, img_src.cols, img_src.rows);
in.substract_mean_normalize(_mean_val, 0);
ncnn::Extractor ex = Net->create_extractor();
ex.set_light_mode(true);
ex.set_num_threads(4);
ex.input(0, in);
ncnn::Mat out, out1, out2;
// loc
ex.extract("output0", out);
// class
ex.extract("530", out1);
//landmark
ex.extract("529", out2);
std::vector<box> anchor;
if (_retinaface)
create_anchor_retinaface(anchor, img_src.cols, img_src.rows);
else
create_anchor(anchor, img_src.cols, img_src.rows);
// TODO: opencv dnn extract
std::vector<bbox> total_box;
float *ptr = out.channel(0);
float *ptr1 = out1.channel(0);
float *landms = out2.channel(0);
// #pragma omp parallel for num_threads(2)
for (int i = 0; i < anchor.size(); ++i)
{
if (*(ptr1+1) > _threshold)
{
box tmp = anchor[i];
box tmp1;
bbox result;
// loc and conf
tmp1.cx = tmp.cx + *ptr * 0.1 * tmp.sx;
tmp1.cy = tmp.cy + *(ptr+1) * 0.1 * tmp.sy;
tmp1.sx = tmp.sx * exp(*(ptr+2) * 0.2);
tmp1.sy = tmp.sy * exp(*(ptr+3) * 0.2);
result.x1 = (tmp1.cx - tmp1.sx/2) * in.w;
if (result.x1<0)
result.x1 = 0;
result.y1 = (tmp1.cy - tmp1.sy/2) * in.h;
if (result.y1<0)
result.y1 = 0;
result.x2 = (tmp1.cx + tmp1.sx/2) * in.w;
if (result.x2>in.w)
result.x2 = in.w;
result.y2 = (tmp1.cy + tmp1.sy/2)* in.h;
if (result.y2>in.h)
result.y2 = in.h;
result.s = *(ptr1 + 1);
// landmark
for (int j = 0; j < 5; ++j)
{
result.point[j]._x =( tmp.cx + *(landms + (j<<1)) * 0.1 * tmp.sx ) * in.w;
result.point[j]._y =( tmp.cy + *(landms + (j<<1) + 1) * 0.1 * tmp.sy ) * in.h;
}
total_box.push_back(result);
}
ptr += 4;
ptr1 += 2;
landms += 10;
}
// std::cout<<'----------------'<<total_box<<std::endl;
std::sort(total_box.begin(), total_box.end(), cmp);
nms(total_box, _nms);
//select the max box
if (total_box.size()>0){
std::vector<int> area_boxes;
for(int ii=0; ii < total_box.size(); ++ii){
int area_box = (total_box[ii].x2 - total_box[ii].x1)*
(total_box[ii].y2 - total_box[ii].y1);
area_boxes.push_back(area_box);
}
cv::Point maxloc;
std:double maxvaule;
cv::minMaxLoc(area_boxes, NULL, &maxvaule, NULL, &maxloc);
boxes.push_back(total_box[maxloc.x]);
} //Created on Sep 09, 2020 by Qiao
}
*/
#endif // !_RETINAFACE_H_
|
hash_mult.h | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <immintrin.h>
#include <algorithm>
#ifdef KNL_EXE
#include <zmmintrin.h>
#else
#include <x86intrin.h>
#endif
#include "utility.h"
#include "CSR.h"
#include "BIN.h"
#define VECTORIZE
/* SpGEMM Specific Parameters */
#define HASH_SCAL 107 // Set disjoint number to hash table size (=2^n)
#ifdef KNL_EXE
#define MIN_HT_S 16 // minimum hash table size per row in symbolic phase
#define MIN_HT_N 16 // minimum hash table size per row in numeric phase
#define VEC_LENGTH 16
#define VEC_LENGTH_BIT 4
#define VEC_LENGTH_LONG 8
#define VEC_LENGTH_LONG_BIT 3
#else
#define MIN_HT_S 8 // minimum hash table size per row in symbolic phase
#define MIN_HT_N 8 // minimum hash table size per row in numeric phase
#define VEC_LENGTH 8
#define VEC_LENGTH_BIT 3
#define VEC_LENGTH_LONG 4
#define VEC_LENGTH_LONG_BIT 2
#endif
/*
* Symbolic phase for Hash SpGEMM.
*/
template <class IT, class NT>
inline void hash_symbolic_kernel(const IT *arpt, const IT *acol, const IT *brpt, const IT *bcol, BIN<IT, NT> &bin)
{
#pragma omp parallel
{
IT tid = omp_get_thread_num();
IT start_row = bin.rows_offset[tid];
IT end_row = bin.rows_offset[tid + 1];
IT *check = bin.local_hash_table_id[tid];
for (IT i = start_row; i < end_row; ++i) {
IT nz = 0;
IT bid = bin.bin_id[i];
if (bid > 0) {
IT ht_size = MIN_HT_S << (bid - 1); // determine hash table size for i-th row
for (IT j = 0; j < ht_size; ++j) { // initialize hash table
check[j] = -1;
}
for (IT j = arpt[i]; j < arpt[i + 1]; ++j) {
IT t_acol = acol[j];
for (IT k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
IT key = bcol[k];
IT hash = (key * HASH_SCAL) & (ht_size - 1);
while (1) { // Loop for hash probing
if (check[hash] == key) { // if the key is already inserted, it's ok
break;
}
else if (check[hash] == -1) { // if the key has not been inserted yet, then it's added.
check[hash] = key;
nz++;
break;
}
else { // linear probing: check next entry
hash = (hash + 1) & (ht_size - 1); //hash = (hash + 1) % ht_size
}
}
}
}
}
bin.row_nz[i] = nz;
}
}
}
#ifdef KNL_EXE
/*
* Symbolic phase for Hash Vector SpGEMM
* This function is optimized for 32-bit integer with AVX-512.
*/
template <class NT>
inline void hash_symbolic_vec_kernel(const int *arpt, const int *acol, const int *brpt, const int *bcol, BIN<int, NT> &bin)
{
#ifdef VECTORIZE
const __m512i init_m = _mm512_set1_epi32(-1);
#endif
#pragma omp parallel
{
int tid = omp_get_thread_num();
int start_row = bin.rows_offset[tid];
int end_row = bin.rows_offset[tid + 1];
int *check = bin.local_hash_table_id[tid];
for (int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m512i key_m, check_m;
__mmask16 mask_m;
#endif
int nz = 0;
int bid = bin.bin_id[i];
if (bid > 0) {
int table_size = MIN_HT_S << (bid - 1); // the number of entries per table
int ht_size = table_size >> VEC_LENGTH_BIT; // the number of chunks (1 chunk = VEC_LENGTH elments)
for (int j = 0; j < table_size; ++j) {
check[j] = -1; // initialize hash table
}
for (int j = arpt[i]; j < arpt[i + 1]; ++j) {
int t_acol = acol[j];
for (int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
int key = bcol[k];
int hash = ((key * HASH_SCAL) & (ht_size - 1)) << VEC_LENGTH_BIT;
#ifdef VECTORIZE
key_m = _mm512_set1_epi32(key);
#endif
while (1) { // Loop for hash probing
// check whether the key is in hash table.
#ifdef VECTORIZE
check_m = _mm512_load_epi32(check + hash);
mask_m = _mm512_cmp_epi32_mask(key_m, check_m, _MM_CMPINT_EQ);
if (mask_m != 0) {
break;
}
#else
bool flag = false;
#pragma vector
for (int l = 0; l < VEC_LENGTH; ++l) {
if (check[(hash << VEC_LENGTH_BIT) + l] == key) {
flag = true;
}
}
if (flag) {
break;
}
#endif
else {
// If the entry with same key cannot be found, check whether the chunk is filled or not
int cur_nz;
#ifdef VECTORIZE
mask_m = _mm512_cmp_epi32_mask(check_m, init_m, _MM_CMPINT_NE)
cur_nz = _popcnt32(mask_m);
#else
cur_nz = VEC_LENGTH;
#pragma vector
for (int l = VEC_LENGTH - 1; l >= 0; --l) {
if (check[(hash << VEC_LENGTH_BIT) + l] == -1) {
cur_nz = l;
}
}
#endif
if (cur_nz < VEC_LENGTH) { //if it is not filled, push the entry to the table
check[hash + cur_nz] = key;
nz++;
break;
}
else { // if is filled, check next chunk (linear probing)
hash = (hash + VEC_LENGTH) & (table_size - 1);
}
}
}
}
}
}
bin.row_nz[i] = nz;
}
}
}
/*
* Symbolic phase for Hash Vector SpGEMM
* This function is optimized for 64-bit integer with AVX-512.
*/
template <class NT>
inline void hash_symbolic_vec_kernel(const long long int *arpt, const long long int *acol, const long long int *brpt, const long long int *bcol, BIN<long long int, NT> &bin)
{
#ifdef VECTORIZE
const __m512i init_m = _mm512_set1_epi64(-1);
#endif
#pragma omp parallel
{
long long int tid = omp_get_thread_num();
long long int start_row = bin.rows_offset[tid];
long long int end_row = bin.rows_offset[tid + 1];
long long int *check = bin.local_hash_table_id[tid];
for (long long int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m512i key_m, check_m;
__mmask8 mask_m;
#endif
long long int nz = 0;
long long int bid = bin.bin_id[i];
if (bid > 0) {
long long int table_size = MIN_HT_S << (bid - 1); // the number of entries per table
long long int ht_size = table_size >> VEC_LENGTH_LONG_BIT; // the number of chunks (1 chunk = VEC_LENGTH elments)
for (long long int j = 0; j < table_size; ++j) {
check[j] = -1; // initialize hash table
}
for (long long int j = arpt[i]; j < arpt[i + 1]; ++j) {
long long int t_acol = acol[j];
for (long long int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
long long int key = bcol[k];
long long int hash = ((key * HASH_SCAL) & (ht_size - 1)) << VEC_LENGTH_LONG_BIT;
#ifdef VECTORIZE
key_m = _mm512_set1_epi64(key);
#endif
while (1) { // loop for hash probing
// check whether the key is in hash table.
#ifdef VECTORIZE
check_m = _mm512_load_epi64(check + hash);
mask_m = _mm512_cmp_epi64_mask(key_m, check_m, _MM_CMPINT_EQ);
if (mask_m != 0) {
break;
}
#else
bool flag = false;
#pragma vector
for (int l = 0; l < VEC_LENGTH_LONG; ++l) {
if (check[(hash << VEC_LENGTH_LONG_BIT) + l] == key) {
flag = true;
}
}
if (flag) {
break;
}
#endif
else {
// If the entry with same key cannot be found, check whether the chunk is filled or not
long long int cur_nz;
#ifdef VECTORIZE
mask_m = _mm512_cmp_epi64_mask(check_m, init_m, _MM_CMPINT_NE);
cur_nz = _popcnt32(mask_m);
#else
cur_nz = VEC_LENGTH;
#pragma vector
for (int l = VEC_LENGTH_LONG - 1; l >= 0; --l) {
if (check[(hash << VEC_LENGTH_LONG_BIT) + l] == -1) {
cur_nz = l;
}
}
#endif
if (cur_nz < VEC_LENGTH_LONG) { //if it is not filled, push the entry to the table
check[hash + cur_nz] = key;
nz++;
break;
}
else { // if is filled, check next chunk (linear probing)
hash = (hash + VEC_LENGTH_LONG) & (table_size - 1);
}
}
}
}
}
}
bin.row_nz[i] = nz;
}
}
}
#else
/*
* Symbolic phase for Hash Vector SpGEMM
* This function is optimized for 32-bit integer with AVX2.
*/
template <class NT>
inline void hash_symbolic_vec_kernel(const int *arpt, const int *acol, const int *brpt, const int *bcol, BIN<int, NT> &bin)
{
#ifdef VECTORIZE
const __m256i init_m = _mm256_set1_epi32(-1);
const __m256i true_m = _mm256_set1_epi32(0xffffffff);
#endif
#pragma omp parallel
{
int tid = omp_get_thread_num();
int start_row = bin.rows_offset[tid];
int end_row = bin.rows_offset[tid + 1];
int *check = bin.local_hash_table_id[tid];
for (int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m256i key_m, check_m;
__m256i mask_m;
int mask;
#endif
int nz = 0;
int bid = bin.bin_id[i];
if (bid > 0) {
int table_size = MIN_HT_S << (bid - 1); // the number of entries per table
int ht_size = table_size >> VEC_LENGTH_BIT; // the number of chunks (1 chunk = VEC_LENGTH elments)
for (int j = 0; j < table_size; ++j) {
check[j] = -1; // initialize hash table
}
for (int j = arpt[i]; j < arpt[i + 1]; ++j) {
int t_acol = acol[j];
for (int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
int key = bcol[k];
int hash = (key * HASH_SCAL) & (ht_size - 1);
#ifdef VECTORIZE
key_m = _mm256_set1_epi32(key);
#endif
while (1) { // Loop for hash probing
// check whether the key is in hash table.
#ifdef VECTORIZE
check_m = _mm256_maskload_epi32(check + (hash << VEC_LENGTH_BIT), true_m);
mask_m = _mm256_cmpeq_epi32(key_m, check_m);
mask = _mm256_movemask_epi8(mask_m);
if (mask != 0) {
break;
}
#else
bool flag = false;
#pragma simd
for (int l = 0; l < VEC_LENGTH; ++l) {
if (check[(hash << VEC_LENGTH_BIT) + l] == key) {
flag = true;
}
}
if (flag) {
break;
}
#endif
else {
// If the entry with same key cannot be found, check whether the chunk is filled or not
int cur_nz;
#ifdef VECTORIZE
mask_m = _mm256_cmpeq_epi32(check_m, init_m);
mask = _mm256_movemask_epi8(mask_m);
cur_nz = (32 - _popcnt32(mask)) >> 2;
#else
cur_nz = VEC_LENGTH;
#pragma simd
for (int l = VEC_LENGTH - 1; l >= 0; --l) {
if (check[(hash << VEC_LENGTH_BIT) + l] == -1) {
cur_nz = l;
}
}
#endif
if (cur_nz < VEC_LENGTH) { //if it is not filled, push the entry to the table
check[(hash << VEC_LENGTH_BIT) + cur_nz] = key;
nz++;
break;
}
else { // if is filled, check next chunk (linear probing)
hash = (hash + 1) & (ht_size - 1);
}
}
}
}
}
}
bin.row_nz[i] = nz;
}
}
}
template <class NT>
inline void hash_symbolic_vec_kernel(const long long int *arpt, const long long int *acol, const long long int *brpt, const long long int *bcol, BIN<long long int, NT> &bin)
{
#ifdef VECTORIZE
const __m256i init_m = _mm256_set1_epi64x(-1);
const __m256i true_m = _mm256_set1_epi64x(0xffffffffffffffff);
#endif
#pragma omp parallel
{
long long int tid = omp_get_thread_num();
long long int start_row = bin.rows_offset[tid];
long long int end_row = bin.rows_offset[tid + 1];
long long int *check = bin.local_hash_table_id[tid];
for (long long int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m256i key_m, check_m;
__m256i mask_m;
int mask;
#endif
long long int nz = 0;
long long int bid = bin.bin_id[i];
if (bid > 0) {
long long int table_size = MIN_HT_S << (bid - 1);
long long int ht_size = table_size >> VEC_LENGTH_LONG_BIT;
for (long long int j = 0; j < table_size; ++j) {
check[j] = -1;
}
for (long long int j = arpt[i]; j < arpt[i + 1]; ++j) {
long long int t_acol = acol[j];
for (long long int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
long long int key = bcol[k];
long long int hash = (key * HASH_SCAL) & (ht_size - 1);
#ifdef VECTORIZE
key_m = _mm256_set1_epi64x(key);
#endif
while (1) {
#ifdef VECTORIZE
check_m = _mm256_maskload_epi64(check + (hash << VEC_LENGTH_LONG_BIT), true_m);
mask_m = _mm256_cmpeq_epi64(key_m, check_m);
mask = _mm256_movemask_epi8(mask_m);
if (mask != 0) {
break;
}
#else
bool flag = false;
#pragma simd
for (int l = 0; l < VEC_LENGTH_LONG; ++l) {
if (check[(hash << VEC_LENGTH_LONG_BIT) + l] == key) {
flag = true;
}
}
if (flag) {
break;
}
#endif
else {
long long int cur_nz;
#ifdef VECTORIZE
mask_m = _mm256_cmpeq_epi64(check_m, init_m);
mask = _mm256_movemask_epi8(mask_m);
cur_nz = (32 - _popcnt32(mask)) >> 3;
#else
cur_nz = VEC_LENGTH_LONG;
#pragma simd
for (int l = VEC_LENGTH_LONG - 1; l >= 0; --l) {
if (check[(hash << VEC_LENGTH_LONG_BIT) + l] == -1) {
cur_nz = l;
}
}
#endif
if (cur_nz < VEC_LENGTH_LONG) {
check[(hash << VEC_LENGTH_LONG_BIT) + cur_nz] = key;
nz++;
break;
}
else {
hash = (hash + 1) & (ht_size - 1);
}
}
}
}
}
}
bin.row_nz[i] = nz;
}
}
}
#endif
// Reference function for Symbolic phase of Hash SpGEMM
template <bool vectorProbing, class IT, class NT>
inline void hash_symbolic(const IT *arpt, const IT *acol, const IT *brpt, const IT *bcol, IT *crpt, BIN<IT, NT> &bin, const IT nrow, IT *nnz)
{
if (vectorProbing) {
hash_symbolic_vec_kernel(arpt, acol, brpt, bcol, bin);
}
else {
hash_symbolic_kernel(arpt, acol, brpt, bcol, bin);
}
/* Set row pointer of matrix C */
scan(bin.row_nz, crpt, nrow + 1);
*nnz = crpt[nrow];
}
/*
* Used for sort function.
* Elements are sorted in ascending order.
*/
template <typename IT, typename NT>
bool sort_less(const pair<IT, NT> &left,const pair<IT, NT> &right)
{
return left.first < right.first;
}
/*
* After calculating on each hash table, sort them in ascending order if necessary, and then store them as output matrix
* This function is used in hash_numeric* function.
* the actual indices of colids and values of output matrix are rpt[rowid];
*/
template <bool sortOutput, typename IT, typename NT>
inline void sort_and_store_table2mat(IT *ht_check, NT *ht_value, IT *colids, NT * values, IT nz, IT ht_size)
{
IT index = 0;
// Sort elements in ascending order if necessary, and store them as output matrix
if (sortOutput) {
vector<pair<IT, NT>> p_vec(nz);
for (IT j = 0; j < ht_size; ++j) { // accumulate non-zero entry from hash table
if (ht_check[j] != -1) {
p_vec[index++] = make_pair(ht_check[j], ht_value[j]);
}
}
sort(p_vec.begin(), p_vec.end(), sort_less<IT, NT>); // sort only non-zero elements
for (IT j = 0; j < index; ++j) { // store the results
colids[j] = p_vec[j].first;
values[j] = p_vec[j].second;
}
}
else {
for (IT j = 0; j < ht_size; ++j) {
if (ht_check[j] != -1) {
colids[index] = ht_check[j];
values[index] = ht_value[j];
index++;
}
}
}
}
/*
* Numeric phase in Hash SpGEMM.
*/
template <bool sortOutput, typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
inline void hash_numeric(const IT *arpt, const IT *acol, const NT *aval, const IT *brpt, const IT *bcol, const NT *bval, const IT *crpt, IT *ccol, NT *cval, const BIN<IT, NT> &bin, const MultiplyOperation multop, const AddOperation addop)
{
#pragma omp parallel
{
IT tid = omp_get_thread_num();
IT start_row = bin.rows_offset[tid];
IT end_row = bin.rows_offset[tid + 1];
IT *ht_check = bin.local_hash_table_id[tid];
NT *ht_value = bin.local_hash_table_val[tid];
for (IT i = start_row; i < end_row; ++i) {
IT bid = bin.bin_id[i];
if (bid > 0) {
IT offset = crpt[i];
IT ht_size = MIN_HT_N << (bid - 1);
for (IT j = 0; j < ht_size; ++j) {
ht_check[j] = -1;
}
for (IT j = arpt[i]; j < arpt[i + 1]; ++j) {
IT t_acol = acol[j];
NT t_aval = aval[j];
for (IT k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
NT t_val = multop(t_aval, bval[k]);
IT key = bcol[k];
IT hash = (key * HASH_SCAL) & (ht_size - 1);
while (1) { // Loop for hash probing
if (ht_check[hash] == key) { // key is already inserted
ht_value[hash] = addop(t_val, ht_value[hash]);
break;
}
else if (ht_check[hash] == -1) { // insert new entry
ht_check[hash] = key;
ht_value[hash] = t_val;
break;
}
else {
hash = (hash + 1) & (ht_size - 1); // (hash + 1) % ht_size
}
}
}
}
sort_and_store_table2mat<sortOutput, IT, NT>(ht_check, ht_value,
ccol + offset, cval + offset,
crpt[i + 1] - offset, ht_size);
}
}
}
}
#ifdef KNL_EXE
/*
* Numeric phase for Hash Vector SpGEMM
* This function is optimized for 32-bit integer with AVX-512.
*/
template <bool sortOutput, typename NT, typename MultiplyOperation, typename AddOperation>
inline void hash_numeric_vec(const int *arpt, const int *acol, const NT *aval, const int *brpt, const int *bcol, const NT *bval, const int *crpt, int *ccol, NT *cval, const BIN<int, NT> &bin, MultiplyOperation multop, AddOperation addop)
{
#ifdef VECTORIZE
const __m512i init_m = _mm512_set1_epi32(-1);
#endif
#pragma omp parallel
{
int tid = omp_get_thread_num();
int start_row = bin.rows_offset[tid];
int end_row = bin.rows_offset[tid + 1];
int *ht_check = bin.local_hash_table_id[tid];
NT *ht_value = bin.local_hash_table_val[tid];
for (int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m512i key_m, check_m;
__mmask16 mask_m, k_m;
#endif
int bid = bin.bin_id[i];
if (bid > 0) {
int offset = crpt[i];
int table_size = MIN_HT_N << (bid - 1); // the number of entries per table
int ht_size = table_size >> VEC_LENGTH_BIT; // the number of chunks (1 chunk = VEC_LENGTH elments)
for (int j = 0; j < table_size; ++j) {
ht_check[j] = -1; // initialize hash table
}
for (int j = arpt[i]; j < arpt[i + 1]; ++j) {
int t_acol = acol[j];
NT t_aval = aval[j];
for (int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
NT t_val = multop(t_aval, bval[k]);
int key = bcol[k];
int hash = ((key * HASH_SCAL) & (ht_size - 1)) << VEC_LENGTH_BIT;
#ifdef VECTORIZE
key_m = _mm512_set1_epi32(key);
#endif
while (1) { // loop for hash probing
// check whether the key is in hash table.
#ifdef VECTORIZE
check_m = _mm512_load_epi32(ht_check + hash);
mask_m = _mm512_cmp_epi32_mask(key_m, check_m, _MM_CMPINT_EQ);
if (mask_m != 0) {
int target = __builtin_ctz(mask_m);
ht_value[hash + target] += t_val;
break;
}
#else
int flag = -1;
#pragma vector
for (int l = 0; l < VEC_LENGTH; ++l) {
if (ht_check[hash + l] == key) {
flag = l;
}
}
if (flag >= 0) {
ht_value[hash + flag] += t_val;
break;
}
#endif
else {
// If the entry with same key cannot be found, check whether the chunk is filled or not
int cur_nz;
#ifdef VECTORIZE
mask_m = _mm512_cmp_epi32_mask(check_m, init_m, _MM_CMPINT_NE);
cur_nz = _popcnt32(mask_m);
#else
cur_nz = VEC_LENGTH;
#pragma vector
for (int l = 0; l < VEC_LENGTH; ++l) {
if (ht_check[hash + l] == -1) {
cur_nz = l;
break;
}
}
#endif
if (cur_nz < VEC_LENGTH) { //if it is not filled, push the entry to the table
ht_check[hash + cur_nz] = key;
ht_value[hash + cur_nz] = t_val;
break;
}
else { // if is filled, check next chunk (linear probing)
hash = (hash + VEC_LENGTH) & (table_size - 1);
}
}
}
}
}
sort_and_store_table2mat<sortOutput, int, NT>(ht_check, ht_value,
ccol + offset, cval + offset,
crpt[i + 1] - offset, ht_size);
}
}
}
}
/*
* Numeric phase for Hash Vector SpGEMM
* This function is optimized for 64-bit integer with AVX-512.
*/
template <bool sortOutput, typename NT, typename MultiplyOperation, typename AddOperation>
inline void hash_numeric_vec(const long long int *arpt, const long long int *acol, const NT *aval, const long long int *brpt, const long long int *bcol, const NT *bval, const long long int *crpt, long long int *ccol, NT *cval, const BIN<long long int, NT> &bin, MultiplyOperation multop, AddOperation addop)
{
#ifdef VECTORIZE
const __m512i init_m = _mm512_set1_epi64(-1);
#endif
#pragma omp parallel
{
long long int tid = omp_get_thread_num();
long long int start_row = bin.rows_offset[tid];
long long int end_row = bin.rows_offset[tid + 1];
long long int *ht_check = bin.local_hash_table_id[tid];
NT *ht_value = bin.local_hash_table_val[tid];
for (long long int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m512i key_m, check_m;
__mmask8 mask_m, k_m;
#endif
long long int bid = bin.bin_id[i];
if (bid > 0) {
long long int offset = crpt[i];
long long int table_size = MIN_HT_N << (bid - 1);
long long int ht_size = table_size >> VEC_LENGTH_LONG_BIT;
for (long long int j = 0; j < table_size; ++j) {
ht_check[j] = -1;
}
for (long long int j = arpt[i]; j < arpt[i + 1]; ++j) {
long long int t_acol = acol[j];
NT t_aval = aval[j];
for (long long int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
NT t_val = multop(t_aval, bval[k]);
long long int key = bcol[k];
long long int hash = ((key * HASH_SCAL) & (ht_size - 1)) << VEC_LENGTH_LONG_BIT;
#ifdef VECTORIZE
key_m = _mm512_set1_epi64(key);
#endif
while (1) { // loop for hash probing
#ifdef VECTORIZE
check_m = _mm512_load_epi64(ht_check + hash);
mask_m = _mm512_cmp_epi64_mask(key_m, check_m, _MM_CMPINT_EQ);
if (mask_m != 0) {
long long int target = __builtin_ctz(mask_m);
ht_value[hash + target] += t_val;
break;
}
#else
long long int flag = -1;
#pragma vector
for (int l = 0; l < VEC_LENGTH_LONG; ++l) {
if (ht_check[hash + l] == key) {
flag = l;
}
}
if (flag >= 0) {
ht_value[hash + flag] += t_val;
break;
}
#endif
else {
long long int cur_nz;
#ifdef VECTORIZE
mask_m = _mm512_cmp_epi64_mask(check_m, init_m, _MM_CMPINT_NE);
cur_nz = _popcnt32(mask_m);
#else
cur_nz = VEC_LENGTH_LONG;
#pragma vector
for (IT l = 0; l < VEC_LENGTH_LONG; ++l) {
if (ht_check[hash + l] == -1) {
cur_nz = l;
break;
}
}
#endif
if (cur_nz < VEC_LENGTH_LONG) {
ht_check[hash + cur_nz] = key;
ht_value[hash + cur_nz] = t_val;
break;
}
else {
hash = (hash + VEC_LENGTH_LONG) & (table_size - 1);
}
}
}
}
}
sort_and_store_table2mat<sortOutput, long long int, NT>(ht_check, ht_value,
ccol + offset, cval + offset,
crpt[i + 1] - offset, ht_size);
}
}
}
}
#else
template <bool sortOutput, typename NT, typename MultiplyOperation, typename AddOperation>
inline void hash_numeric_vec(const int *arpt, const int *acol, const NT *aval, const int *brpt, const int *bcol, const NT *bval, const int *crpt, int *ccol, NT *cval, const BIN<int, NT> &bin, MultiplyOperation multop, AddOperation addop)
{
#ifdef VECTORIZE
const __m256i init_m = _mm256_set1_epi32(-1);
const __m256i true_m = _mm256_set1_epi32(0xffffffff);
#endif
#pragma omp parallel
{
int tid = omp_get_thread_num();
int start_row = bin.rows_offset[tid];
int end_row = bin.rows_offset[tid + 1];
int *ht_check = bin.local_hash_table_id[tid];
NT *ht_value = bin.local_hash_table_val[tid];
for (int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m256i key_m, check_m, mask_m;
int mask;
#endif
int bid = bin.bin_id[i];
if (bid > 0) {
int offset = crpt[i];
int table_size = MIN_HT_N << (bid - 1);
int ht_size = table_size >> VEC_LENGTH_BIT;
for (int j = 0; j < table_size; ++j) {
ht_check[j] = -1;
}
for (int j = arpt[i]; j < arpt[i + 1]; ++j) {
int t_acol = acol[j];
NT t_aval = aval[j];
for (int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
NT t_val = multop(t_aval, bval[k]);
int key = bcol[k];
int hash = (key * HASH_SCAL) & (ht_size - 1);
#ifdef VECTORIZE
key_m = _mm256_set1_epi32(key);
#endif
while (1) {
#ifdef VECTORIZE
check_m = _mm256_maskload_epi32(ht_check + (hash << VEC_LENGTH_BIT), true_m);
mask_m = _mm256_cmpeq_epi32(key_m, check_m);
mask = _mm256_movemask_epi8(mask_m);
if (mask != 0) {
int target = __builtin_ctz(mask) >> 2;
ht_value[(hash << VEC_LENGTH_BIT) + target] += t_val;
break;
}
#else
int flag = -1;
for (int l = 0; l < VEC_LENGTH; ++l) {
if (ht_check[(hash << VEC_LENGTH_BIT) + l] == key) {
flag = l;
}
}
if (flag >= 0) {
ht_value[(hash << VEC_LENGTH_BIT) + flag] += t_val;
break;
}
#endif
else {
int cur_nz;
#ifdef VECTORIZE
mask_m = _mm256_cmpeq_epi32(check_m, init_m);
mask = _mm256_movemask_epi8(mask_m);
cur_nz = (32 - _popcnt32(mask)) >> 2;
#else
cur_nz = VEC_LENGTH;
for (int l = 0; l < VEC_LENGTH; ++l) {
if (ht_check[(hash << VEC_LENGTH_BIT) + l] == -1) {
cur_nz = l;
break;
}
}
#endif
if (cur_nz < VEC_LENGTH) {
ht_check[(hash << VEC_LENGTH_BIT) + cur_nz] = key;
ht_value[(hash << VEC_LENGTH_BIT) + cur_nz] = t_val;
break;
}
else {
hash = (hash + 1) & (ht_size - 1);
}
}
}
}
}
sort_and_store_table2mat<sortOutput, int, NT>(ht_check, ht_value,
ccol + offset, cval + offset,
crpt[i + 1] - offset, ht_size);
}
}
}
}
template <bool sortOutput, typename NT, typename MultiplyOperation, typename AddOperation>
inline void hash_numeric_vec(const long long int *arpt, const long long int *acol, const NT *aval, const long long int *brpt, const long long int *bcol, const NT *bval, const long long int *crpt, long long int *ccol, NT *cval, const BIN<long long int, NT> &bin, MultiplyOperation multop, AddOperation addop)
{
#ifdef VECTORIZE
const __m256i init_m = _mm256_set1_epi64x(-1);
const __m256i true_m = _mm256_set1_epi64x(0xffffffffffffffff);
#endif
#pragma omp parallel
{
long long int tid = omp_get_thread_num();
long long int start_row = bin.rows_offset[tid];
long long int end_row = bin.rows_offset[tid + 1];
long long int *ht_check = bin.local_hash_table_id[tid];
NT *ht_value = bin.local_hash_table_val[tid];
for (long long int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m256i key_m, check_m, mask_m;
int mask;
#endif
long long int bid = bin.bin_id[i];
if (bid > 0) {
long long int offset = crpt[i];
long long int table_size = MIN_HT_N << (bid - 1);
long long int ht_size = table_size >> VEC_LENGTH_LONG_BIT;
for (long long int j = 0; j < table_size; ++j) {
ht_check[j] = -1;
}
for (long long int j = arpt[i]; j < arpt[i + 1]; ++j) {
long long int t_acol = acol[j];
NT t_aval = aval[j];
for (long long int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
NT t_val = multop(t_aval, bval[k]);
long long int key = bcol[k];
long long int hash = (key * HASH_SCAL) & (ht_size - 1);
#ifdef VECTORIZE
key_m = _mm256_set1_epi64x(key);
#endif
while (1) {
#ifdef VECTORIZE
check_m = _mm256_maskload_epi64(ht_check + (hash << VEC_LENGTH_LONG_BIT), true_m);
mask_m = _mm256_cmpeq_epi64(key_m, check_m);
mask = _mm256_movemask_epi8(mask_m);
if (mask != 0) {
int target = __builtin_ctz(mask) >> 3;
ht_value[(hash << VEC_LENGTH_LONG_BIT) + target] += t_val;
break;
}
#else
int flag = -1;
for (int l = 0; l < VEC_LENGTH_LONG; ++l) {
if (ht_check[(hash << VEC_LENGTH_LONG_BIT) + l] == key) {
flag = l;
}
}
if (flag >= 0) {
ht_value[(hash << VEC_LENGTH_LONG_BIT) + flag] += t_val;
break;
}
#endif
else {
int cur_nz;
#ifdef VECTORIZE
mask_m = _mm256_cmpeq_epi64(check_m, init_m);
mask = _mm256_movemask_epi8(mask_m);
cur_nz = (32 - _popcnt32(mask)) >> 3;
#else
cur_nz = VEC_LENGTH_LONG;
for (int l = 0; l < VEC_LENGTH_LONG; ++l) {
if (ht_check[(hash << VEC_LENGTH_LONG_BIT) + l] == -1) {
cur_nz = l;
break;
}
}
#endif
if (cur_nz < VEC_LENGTH_LONG) {
ht_check[(hash << VEC_LENGTH_LONG_BIT) + cur_nz] = key;
ht_value[(hash << VEC_LENGTH_LONG_BIT) + cur_nz] = t_val;
break;
}
else {
hash = (hash + 1) & (ht_size - 1);
}
}
}
}
}
sort_and_store_table2mat<sortOutput, long long int, NT>(ht_check, ht_value,
ccol + offset, cval + offset,
crpt[i + 1] - offset, ht_size);
}
}
}
}
#endif
/*
* Executing Hash SpGEMM
* The function starts with initialization of hash table followed by symbolic phase and numeric phase with hash table.
*/
template <bool vectorProbing, bool sortOutput, typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
void HashSpGEMM(const CSR<IT, NT> &a, const CSR<IT, NT> &b, CSR<IT, NT> &c, MultiplyOperation multop, AddOperation addop)
{
BIN<IT, NT> bin(a.rows, MIN_HT_S);
c.rows = a.rows;
c.cols = b.cols;
c.zerobased = true;
/* Set max bin */
bin.set_max_bin(a.rowptr, a.colids, b.rowptr, c.rows, c.cols);
/* Create hash table (thread local) */
bin.create_local_hash_table(c.cols);
/* Symbolic Phase */
c.rowptr = my_malloc<IT>(c.rows + 1);
hash_symbolic<vectorProbing>(a.rowptr, a.colids, b.rowptr, b.colids, c.rowptr, bin, c.rows, &(c.nnz));
c.colids = my_malloc<IT>(c.nnz);
c.values = my_malloc<NT>(c.nnz);
/* Numeric Phase */
if (vectorProbing) {
hash_numeric_vec<sortOutput>(a.rowptr, a.colids, a.values, b.rowptr, b.colids, b.values, c.rowptr, c.colids, c.values, bin, multop, addop);
}
else {
hash_numeric<sortOutput>(a.rowptr, a.colids, a.values, b.rowptr, b.colids, b.values, c.rowptr, c.colids, c.values, bin, multop, addop);
}
}
/*
* Hash SpGEMM functions called without full template values
*/
template <bool sortOutput, typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
void HashSpGEMM(const CSR<IT, NT> &a, const CSR<IT, NT> &b, CSR<IT, NT> &c, MultiplyOperation multop, AddOperation addop)
{
HashSpGEMM<false, sortOutput, IT, NT, MultiplyOperation, AddOperation>(a, b, c, multop, addop);
}
template <typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
void HashSpGEMM(const CSR<IT, NT> &a, const CSR<IT, NT> &b, CSR<IT, NT> &c, MultiplyOperation multop, AddOperation addop)
{
HashSpGEMM<false, true, IT, NT, MultiplyOperation, AddOperation>(a, b, c, multop, addop);
}
|
GB_unop__bnot_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__bnot_uint16_uint16)
// op(A') function: GB (_unop_tran__bnot_uint16_uint16)
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = aij
// unaryop: cij = ~(aij)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ~(x) ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = aij ; \
Cx [pC] = ~(z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BNOT || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__bnot_uint16_uint16)
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = ~(z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = ~(z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__bnot_uint16_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fc_hcl_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "fc_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "module/module.h"
#include "operator/op.h"
#include "utility/sys_port.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
#if __SSE2__
#include <emmintrin.h>
#endif
#if __AVX__
#include <immintrin.h>
#endif
struct fc_data
{
int need_trans;
int batch; // N
int out_number; // OUT
int hidden; // hidden
int zero[3]; // input, kernel, output
float scale[3]; // input, kernel, output
};
static int innerproduct(int inn, int inc, int inh, int inw, int outc, const float* weight, const float* input, float* output,
const float* _bias, int num_thread, int cpu_affinity)
{
size_t elemsize = sizeof(float);
int size = inw * inh;
for (int n = 0; n < inn; n++)
{
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outc; p++)
{
int q = 0;
float sum = _bias ? _bias[p] : 0.f;
const float* weight1 = weight + p * inc * size;
const float* input1 = input + n * inc * size;
#if __AVX__ || __SSE__
#if __SSE__
float _sum[4] = {0.f};
__m128 _sum0 = _mm_set1_ps(0.f);
for (; q + 3 < inc * size; q = q + 4)
{
__m128 _input = _mm_loadu_ps(input1 + q);
__m128 _weight = _mm_loadu_ps(weight1 + q);
__m128 _sum1 = _mm_mul_ps(_input, _weight);
_sum0 = _mm_add_ps(_sum0, _sum1);
}
_mm_storeu_ps(_sum, _sum0);
float tmp = _sum[0] + _sum[1] + _sum[2] + _sum[3];
sum = sum + tmp;
#else //__AVX__
// TODO
#endif
#endif
for (; q < inc * size; q++)
{
float tmp = input1[q] * weight1[q];
sum = sum + tmp;
}
output[n * outc + p] = sum;
}
}
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct fc_data* op_param = ( struct fc_data* )sys_malloc(sizeof(struct fc_data));
memset(op_param, 0, sizeof(struct fc_data));
exec_node->ops_priv = op_param;
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
sys_free(exec_node->ops_priv);
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* weight_tensor;
struct tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct fc_param* param = ( struct fc_param* )ir_node->op.param_mem;
struct fc_data* op_param = ( struct fc_data* )exec_node->ops_priv;
if (ir_graph->graph_layout == TENGINE_LAYOUT_NCHW)
{
int hidden = input_tensor->dims[1];
if (input_tensor->dim_num > 2)
hidden = hidden * input_tensor->dims[2];
if (input_tensor->dim_num > 3)
hidden = hidden * input_tensor->dims[3];
op_param->hidden = hidden;
}
else
{
int hidden = 0;
if (input_tensor->dim_num == 2)
hidden = input_tensor->dims[1];
if (input_tensor->dim_num == 3)
hidden = input_tensor->dims[1] * input_tensor->dims[2];
if (input_tensor->dim_num == 4)
hidden = input_tensor->dims[1] * input_tensor->dims[2] * input_tensor->dims[3];
op_param->hidden = hidden;
}
op_param->batch = input_tensor->dims[0];
op_param->out_number = param->num_output;
int weight_out = weight_tensor->dims[0];
if (weight_out == op_param->out_number)
op_param->need_trans = 0;
else
op_param->need_trans = 1;
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* weight_tensor;
struct tensor* bias_tensor;
struct tensor* output_tensor;
int num_thread = exec_graph->num_thread;
int cpu_affinity = exec_graph->cpu_affinity;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct fc_param* param = ( struct fc_param* )ir_node->op.param_mem;
struct fc_data* op_param = ( struct fc_data* )exec_node->ops_priv;
const void* input_data = input_tensor->data;
void* weight_data = weight_tensor->data;
void* output_data = output_tensor->data;
int batch_number = input_tensor->dims[0];
int inc = input_tensor->dims[1];
int inh = input_tensor->dims[2] ? input_tensor->dims[2] : 1;
int inw = input_tensor->dims[3] ? input_tensor->dims[3] : 1;
int outc = output_tensor->dims[1];
void* bias_data = NULL;
if (ir_node->input_num > 2)
{
bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]);
bias_data = bias_tensor->data;
}
if (innerproduct(batch_number, inc, inh, inw, outc, weight_data, input_data, output_data, bias_data, num_thread, cpu_affinity) < 0)
return -1;
return 0;
}
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* node = exec_node->ir_node;
struct graph* graph = node->graph;
struct tensor* input = get_ir_graph_tensor(graph, node->input_tensors[0]);
struct tensor* weight = get_ir_graph_tensor(graph, node->input_tensors[1]);
struct tensor* output = get_ir_graph_tensor(graph, node->output_tensors[0]);
int dim[4];
int n = weight->dims[0];
int k = weight->dims[1];
int m = input->dims[0];
int input_k = input->dims[1];
if (input->dim_num == 2)
{
dim[0] = m;
dim[1] = n;
}
else if (input->dim_num == 3)
{
if (input->dims[2] != 0)
input_k *= input->dims[2];
if (graph->graph_layout == TENGINE_LAYOUT_NHWC)
{
dim[0] = m;
dim[1] = 1;
dim[2] = n;
}
else
{
dim[0] = m;
dim[1] = n;
dim[2] = 1;
}
}
else if (input->dim_num == 4)
{
if (input->dims[2] * input->dims[3] != 0)
input_k *= input->dims[2] * input->dims[3];
if (graph->graph_layout == TENGINE_LAYOUT_NHWC)
{
dim[0] = m;
dim[1] = 1;
dim[2] = 1;
dim[3] = n;
}
else
{
dim[0] = m;
dim[1] = n;
dim[2] = 1;
dim[3] = 1;
}
}
else
return -1;
if (k != input_k)
{
TLOG_ERR("fc: input tensor and weight tensor shape does not match, hidden_number: %d\n", k);
return -1;
}
int ret = set_ir_tensor_shape(output, dim, input->dim_num);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
struct node* ir_node = exec_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
/* todo support uint8 */
if (input_tensor->data_type != TENGINE_DT_FP32)
return 0;
return OPS_SCORE_BEST;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_fc_hcl_x86_op()
{
return register_builtin_node_ops(OP_FC, &hcl_node_ops);
}
int unregister_fc_hcl_x86_op()
{
return unregister_builtin_node_ops(OP_FC, &hcl_node_ops);
}
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
const float* k0 = kernel + p*inch*9;
const float* k1 = kernel + (p+1)*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr0n = outptr0 + outw;
float* outptr1n = outptr1 + outw;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
#if __ARM_NEON
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k03 = vld1q_f32(k0+3);
float32x4_t _k06 = vld1q_f32(k0+6);
float32x4_t _k10 = vld1q_f32(k1);
float32x4_t _k13 = vld1q_f32(k1+3);
float32x4_t _k16 = vld1q_f32(k1+6);
#endif // __ARM_NEON
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _sum1 = vld1q_f32(outptr1);
float32x4_t _sum0n = vld1q_f32(outptr0n);
float32x4_t _sum1n = vld1q_f32(outptr1n);
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r00n = vld1q_f32(r0 + 4);
float32x4_t _r01 = vextq_f32(_r00, _r00n, 1);
float32x4_t _r02 = vextq_f32(_r00, _r00n, 2);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r10n = vld1q_f32(r1 + 4);
float32x4_t _r11 = vextq_f32(_r10, _r10n, 1);
float32x4_t _r12 = vextq_f32(_r10, _r10n, 2);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r20n = vld1q_f32(r2 + 4);
float32x4_t _r21 = vextq_f32(_r20, _r20n, 1);
float32x4_t _r22 = vextq_f32(_r20, _r20n, 2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r30n = vld1q_f32(r3 + 4);
float32x4_t _r31 = vextq_f32(_r30, _r30n, 1);
float32x4_t _r32 = vextq_f32(_r30, _r30n, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _r00, _k00, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _r01, _k00, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _r02, _k00, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _r10, _k03, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _r11, _k03, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _r12, _k03, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _r20, _k06, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _r21, _k06, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _r22, _k06, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r00, _k10, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r01, _k10, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r02, _k10, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r10, _k13, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r11, _k13, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r12, _k13, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r20, _k16, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r21, _k16, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r22, _k16, 2);
_sum0n = vfmaq_laneq_f32(_sum0n, _r10, _k00, 0);
_sum0n = vfmaq_laneq_f32(_sum0n, _r11, _k00, 1);
_sum0n = vfmaq_laneq_f32(_sum0n, _r12, _k00, 2);
_sum0n = vfmaq_laneq_f32(_sum0n, _r20, _k03, 0);
_sum0n = vfmaq_laneq_f32(_sum0n, _r21, _k03, 1);
_sum0n = vfmaq_laneq_f32(_sum0n, _r22, _k03, 2);
_sum0n = vfmaq_laneq_f32(_sum0n, _r30, _k06, 0);
_sum0n = vfmaq_laneq_f32(_sum0n, _r31, _k06, 1);
_sum0n = vfmaq_laneq_f32(_sum0n, _r32, _k06, 2);
_sum1n = vfmaq_laneq_f32(_sum1n, _r10, _k10, 0);
_sum1n = vfmaq_laneq_f32(_sum1n, _r11, _k10, 1);
_sum1n = vfmaq_laneq_f32(_sum1n, _r12, _k10, 2);
_sum1n = vfmaq_laneq_f32(_sum1n, _r20, _k13, 0);
_sum1n = vfmaq_laneq_f32(_sum1n, _r21, _k13, 1);
_sum1n = vfmaq_laneq_f32(_sum1n, _r22, _k13, 2);
_sum1n = vfmaq_laneq_f32(_sum1n, _r30, _k16, 0);
_sum1n = vfmaq_laneq_f32(_sum1n, _r31, _k16, 1);
_sum1n = vfmaq_laneq_f32(_sum1n, _r32, _k16, 2);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr1, _sum1);
vst1q_f32(outptr0n, _sum0n);
vst1q_f32(outptr1n, _sum1n);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
outptr0 += 4;
outptr1 += 4;
outptr0n += 4;
outptr1n += 4;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%5, #192] \n"
"vld1.f32 {d16-d18}, [%5 :64] \n"// r0
"add %5, #16 \n"
"pld [%8, #192] \n"
"vld1.f32 {d28-d30}, [%8] \n"// r3
"add %8, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q14, q15, #2 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d12-d13}, [%1 :64] \n"// _sum0
"pld [%2, #128] \n"
"vld1.f32 {d14-d15}, [%2 :64] \n"// _sum1
"vmla.f32 q6, q8, %e18[0] \n"
"vmla.f32 q7, q8, %e21[0] \n"
"pld [%3, #128] \n"
"vld1.f32 {d24-d25}, [%3] \n"// _sum0n
"pld [%4, #128] \n"
"vld1.f32 {d26-d27}, [%4] \n"// _sum1n
"vmla.f32 q12, q14, %e20[0] \n"
"vmla.f32 q13, q14, %e23[0] \n"
"vext.32 q8, q8, q9, #2 \n"
"vext.32 q9, q14, q15, #1 \n"
"vmla.f32 q6, q10, %e18[1] \n"
"vmla.f32 q7, q10, %e21[1] \n"
"vmla.f32 q12, q11, %f20[0] \n"
"vmla.f32 q13, q11, %f23[0] \n"
"pld [%6, #192] \n"
"vld1.f32 {d28-d30}, [%6] \n"// r1
"add %6, #16 \n"
"vmla.f32 q6, q8, %f18[0] \n"
"vmla.f32 q7, q8, %f21[0] \n"
"vmla.f32 q12, q9, %e20[1] \n"
"vmla.f32 q13, q9, %e23[1] \n"
"vext.32 q10, q14, q15, #1 \n"
"vmla.f32 q6, q14, %e19[0] \n"
"vmla.f32 q7, q14, %e22[0] \n"
"vmla.f32 q12, q14, %e18[0] \n"
"vmla.f32 q13, q14, %e21[0] \n"
"vext.32 q11, q14, q15, #2 \n"
"vmla.f32 q6, q10, %e19[1] \n"
"vmla.f32 q7, q10, %e22[1] \n"
"vmla.f32 q12, q10, %e18[1] \n"
"vmla.f32 q13, q10, %e21[1] \n"
"pld [%7, #192] \n"
"vld1.f32 {d16-d18}, [%7 :64] \n"// r2
"add %7, #16 \n"
"vmla.f32 q6, q11, %f19[0] \n"
"vmla.f32 q7, q11, %f22[0] \n"
"vmla.f32 q12, q11, %f18[0] \n"
"vmla.f32 q13, q11, %f21[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vmla.f32 q6, q8, %e20[0] \n"
"vmla.f32 q7, q8, %e23[0] \n"
"vmla.f32 q12, q8, %e19[0] \n"
"vmla.f32 q13, q8, %e22[0] \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q6, q10, %e20[1] \n"
"vmla.f32 q7, q10, %e23[1] \n"
"vmla.f32 q12, q10, %e19[1] \n"
"vmla.f32 q13, q10, %e22[1] \n"
"pld [%5, #192] \n"
"vld1.f32 {d16-d18}, [%5 :64] \n"// r0
"add %5, #16 \n"
"vmla.f32 q6, q11, %f20[0] \n"
"vmla.f32 q7, q11, %f23[0] \n"
"vmla.f32 q12, q11, %f19[0] \n"
"vmla.f32 q13, q11, %f22[0] \n"
"pld [%8, #192] \n"
"vld1.f32 {d28-d30}, [%8] \n"// r3
"add %8, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vst1.f32 {d12-d13}, [%1 : 64]!\n"
"vst1.f32 {d14-d15}, [%2 : 64]!\n"
"vext.32 q11, q14, q15, #2 \n"
"vst1.f32 {d24-d25}, [%3]! \n"
"vst1.f32 {d26-d27}, [%4]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %5, #16 \n"
"sub %8, #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr0n), // %3
"=r"(outptr1n), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr0n),
"4"(outptr1n),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"w"(_k00), // %18
"w"(_k03), // %19
"w"(_k06), // %20
"w"(_k10), // %21
"w"(_k13), // %22
"w"(_k16) // %23
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _sum0 = vmulq_f32(_r00, _k00);
float32x4_t _sum1 = vmulq_f32(_r00, _k10);
_sum0 = vmlaq_f32(_sum0, _r10, _k03);
_sum1 = vmlaq_f32(_sum1, _r10, _k13);
_sum0 = vmlaq_f32(_sum0, _r20, _k06);
_sum1 = vmlaq_f32(_sum1, _r20, _k16);
float32x4_t _sum0n = vmulq_f32(_r10, _k00);
float32x4_t _sum1n = vmulq_f32(_r10, _k10);
_sum0n = vmlaq_f32(_sum0n, _r20, _k03);
_sum1n = vmlaq_f32(_sum1n, _r20, _k13);
_sum0n = vmlaq_f32(_sum0n, _r30, _k06);
_sum1n = vmlaq_f32(_sum1n, _r30, _k16);
_sum0 = vsetq_lane_f32(*outptr0, _sum0, 3);
_sum1 = vsetq_lane_f32(*outptr1, _sum1, 3);
_sum0n = vsetq_lane_f32(*outptr0n, _sum0n, 3);
_sum1n = vsetq_lane_f32(*outptr1n, _sum1n, 3);
#if __aarch64__
*outptr0 = vaddvq_f32(_sum0);
*outptr1 = vaddvq_f32(_sum1);
*outptr0n = vaddvq_f32(_sum0n);
*outptr1n = vaddvq_f32(_sum1n);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1));
float32x2_t _ss0n = vadd_f32(vget_low_f32(_sum0n), vget_high_f32(_sum0n));
float32x2_t _ss1n = vadd_f32(vget_low_f32(_sum1n), vget_high_f32(_sum1n));
float32x2_t _ss01 = vpadd_f32(_ss0, _ss1);
float32x2_t _ss01n = vpadd_f32(_ss0n, _ss1n);
*outptr0 = vget_lane_f32(_ss01, 0);
*outptr1 = vget_lane_f32(_ss01, 1);
*outptr0n = vget_lane_f32(_ss01n, 0);
*outptr1n = vget_lane_f32(_ss01n, 1);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum0n = 0.f;
float sum1 = 0.f;
float sum1n = 0.f;
sum0 += r0[0] * k0[0];
sum0 += r0[1] * k0[1];
sum0 += r0[2] * k0[2];
sum0 += r1[0] * k0[3];
sum0 += r1[1] * k0[4];
sum0 += r1[2] * k0[5];
sum0 += r2[0] * k0[6];
sum0 += r2[1] * k0[7];
sum0 += r2[2] * k0[8];
sum1 += r0[0] * k1[0];
sum1 += r0[1] * k1[1];
sum1 += r0[2] * k1[2];
sum1 += r1[0] * k1[3];
sum1 += r1[1] * k1[4];
sum1 += r1[2] * k1[5];
sum1 += r2[0] * k1[6];
sum1 += r2[1] * k1[7];
sum1 += r2[2] * k1[8];
sum0n += r1[0] * k0[0];
sum0n += r1[1] * k0[1];
sum0n += r1[2] * k0[2];
sum0n += r2[0] * k0[3];
sum0n += r2[1] * k0[4];
sum0n += r2[2] * k0[5];
sum0n += r3[0] * k0[6];
sum0n += r3[1] * k0[7];
sum0n += r3[2] * k0[8];
sum1n += r1[0] * k1[0];
sum1n += r1[1] * k1[1];
sum1n += r1[2] * k1[2];
sum1n += r2[0] * k1[3];
sum1n += r2[1] * k1[4];
sum1n += r2[2] * k1[5];
sum1n += r3[0] * k1[6];
sum1n += r3[1] * k1[7];
sum1n += r3[2] * k1[8];
*outptr0 += sum0;
*outptr1 += sum1;
*outptr0n += sum0n;
*outptr1n += sum1n;
#endif // __ARM_NEON
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr1++;
outptr0n++;
outptr1n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr1 += outw;
outptr0n += outw;
outptr1n += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _sum1 = vld1q_f32(outptr1);
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r00n = vld1q_f32(r0 + 4);
float32x4_t _r01 = vextq_f32(_r00, _r00n, 1);
float32x4_t _r02 = vextq_f32(_r00, _r00n, 2);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r10n = vld1q_f32(r1 + 4);
float32x4_t _r11 = vextq_f32(_r10, _r10n, 1);
float32x4_t _r12 = vextq_f32(_r10, _r10n, 2);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r20n = vld1q_f32(r2 + 4);
float32x4_t _r21 = vextq_f32(_r20, _r20n, 1);
float32x4_t _r22 = vextq_f32(_r20, _r20n, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _r00, _k00, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _r01, _k00, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _r02, _k00, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _r10, _k03, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _r11, _k03, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _r12, _k03, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _r20, _k06, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _r21, _k06, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _r22, _k06, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r00, _k10, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r01, _k10, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r02, _k10, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r10, _k13, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r11, _k13, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r12, _k13, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r20, _k16, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r21, _k16, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r22, _k16, 2);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr1, _sum1);
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 4;
outptr1 += 4;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #192] \n"
"vld1.f32 {d16-d18}, [%3] \n"// r0
"add %3, #16 \n"
"pld [%1, #128] \n"
"vld1.f32 {d12-d13}, [%1] \n"// _sum0
"pld [%2, #128] \n"
"vld1.f32 {d14-d15}, [%2] \n"// _sum1
"vmul.f32 q14, q8, %e12[0] \n"
"vmul.f32 q15, q8, %e15[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q6, q10, %e12[1] \n"
"vmla.f32 q7, q10, %e15[1] \n"
"pld [%4, #192] \n"
"vld1.f32 {d16-d18}, [%4] \n"// r1
"add %4, #16 \n"
"vmla.f32 q14, q11, %f12[0] \n"
"vmla.f32 q15, q11, %f15[0] \n"
"vmla.f32 q6, q8, %e13[0] \n"
"vmla.f32 q7, q8, %e16[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q14, q10, %e13[1] \n"
"vmla.f32 q15, q10, %e16[1] \n"
"pld [%5, #192] \n"
"vld1.f32 {d16-d18}, [%5] \n"// r2
"add %5, #16 \n"
"vmla.f32 q6, q11, %f13[0] \n"
"vmla.f32 q7, q11, %f16[0] \n"
"vmla.f32 q14, q8, %e14[0] \n"
"vmla.f32 q15, q8, %e17[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q6, q10, %e14[1] \n"
"vmla.f32 q7, q10, %e17[1] \n"
"vmla.f32 q14, q11, %f14[0] \n"
"vmla.f32 q15, q11, %f17[0] \n"
"vadd.f32 q6, q6, q14 \n"
"vadd.f32 q7, q7, q15 \n"
"vst1.f32 {d12-d13}, [%1]! \n"
"vst1.f32 {d14-d15}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum0 = vmulq_f32(_r00, _k00);
float32x4_t _sum1 = vmulq_f32(_r00, _k10);
_sum0 = vmlaq_f32(_sum0, _r10, _k03);
_sum1 = vmlaq_f32(_sum1, _r10, _k13);
_sum0 = vmlaq_f32(_sum0, _r20, _k06);
_sum1 = vmlaq_f32(_sum1, _r20, _k16);
_sum0 = vsetq_lane_f32(*outptr0, _sum0, 3);
_sum1 = vsetq_lane_f32(*outptr1, _sum1, 3);
#if __aarch64__
*outptr0 = vaddvq_f32(_sum0);
*outptr1 = vaddvq_f32(_sum1);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1));
float32x2_t _ss01 = vpadd_f32(_ss0, _ss1);
*outptr0 = vget_lane_f32(_ss01, 0);
*outptr1 = vget_lane_f32(_ss01, 1);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
sum0 += r0[0] * k0[0];
sum0 += r0[1] * k0[1];
sum0 += r0[2] * k0[2];
sum0 += r1[0] * k0[3];
sum0 += r1[1] * k0[4];
sum0 += r1[2] * k0[5];
sum0 += r2[0] * k0[6];
sum0 += r2[1] * k0[7];
sum0 += r2[2] * k0[8];
sum1 += r0[0] * k1[0];
sum1 += r0[1] * k1[1];
sum1 += r0[2] * k1[2];
sum1 += r1[0] * k1[3];
sum1 += r1[1] * k1[4];
sum1 += r1[2] * k1[5];
sum1 += r2[0] * k1[6];
sum1 += r2[1] * k1[7];
sum1 += r2[2] * k1[8];
*outptr0 += sum0;
*outptr1 += sum1;
#endif // __ARM_NEON
r0++;
r1++;
r2++;
outptr0++;
outptr1++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9;
k1 += 9;
}
}
#pragma omp parallel for
for (int p=remain_outch_start; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
const float* kernel0 = kernel + p*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k3456 = vld1q_f32(kernel0+3);
float32x4_t _k6789 = vld1q_f32(kernel0+6);
#else
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#endif // __ARM_NEON
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum1 = vld1q_f32(outptr);
float32x4_t _sum3 = vld1q_f32(outptr2);
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r00n = vld1q_f32(r0 + 4);
float32x4_t _r01 = vextq_f32(_r00, _r00n, 1);
float32x4_t _r02 = vextq_f32(_r00, _r00n, 2);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r10n = vld1q_f32(r1 + 4);
float32x4_t _r11 = vextq_f32(_r10, _r10n, 1);
float32x4_t _r12 = vextq_f32(_r10, _r10n, 2);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r20n = vld1q_f32(r2 + 4);
float32x4_t _r21 = vextq_f32(_r20, _r20n, 1);
float32x4_t _r22 = vextq_f32(_r20, _r20n, 2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r30n = vld1q_f32(r3 + 4);
float32x4_t _r31 = vextq_f32(_r30, _r30n, 1);
float32x4_t _r32 = vextq_f32(_r30, _r30n, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r00, _k0123, 0);
float32x4_t _sum2 = vmulq_laneq_f32(_r01, _k0123, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r02, _k0123, 2);
_sum2 = vfmaq_laneq_f32(_sum2, _r10, _k3456, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r11, _k3456, 1);
_sum2 = vfmaq_laneq_f32(_sum2, _r12, _k3456, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r20, _k6789, 0);
_sum2 = vfmaq_laneq_f32(_sum2, _r21, _k6789, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r22, _k6789, 2);
_sum3 = vfmaq_laneq_f32(_sum3, _r10, _k0123, 0);
float32x4_t _sum4 = vmulq_laneq_f32(_r11, _k0123, 1);
_sum3 = vfmaq_laneq_f32(_sum3, _r12, _k0123, 2);
_sum4 = vfmaq_laneq_f32(_sum4, _r20, _k3456, 0);
_sum3 = vfmaq_laneq_f32(_sum3, _r21, _k3456, 1);
_sum4 = vfmaq_laneq_f32(_sum4, _r22, _k3456, 2);
_sum3 = vfmaq_laneq_f32(_sum3, _r30, _k6789, 0);
_sum4 = vfmaq_laneq_f32(_sum4, _r31, _k6789, 1);
_sum3 = vfmaq_laneq_f32(_sum3, _r32, _k6789, 2);
_sum1 = vaddq_f32(_sum1, _sum2);
_sum3 = vaddq_f32(_sum3, _sum4);
vst1q_f32(outptr, _sum1);
vst1q_f32(outptr2, _sum3);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
outptr += 4;
outptr2 += 4;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%3, #192] \n"
"vld1.f32 {d18-d20}, [%3 :64] \n"// r0
"add %3, #16 \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1 :64] \n"// _sum
"vmla.f32 q7, q9, %e14[0] \n"
"vmul.f32 q6, q11, %e14[1] \n"
"vmul.f32 q13, q12, %f14[0] \n"
"pld [%4, #192] \n"
"vld1.f32 {d18-d20}, [%4] \n"// r1
"add %4, #16 \n"
"vmla.f32 q7, q9, %e15[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q6, q11, %e15[1] \n"
"vmla.f32 q13, q12, %f15[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d16-d17}, [%2] \n"// _sum2
"vmla.f32 q8, q9, %e14[0] \n"
"vmul.f32 q14, q11, %e14[1] \n"
"vmul.f32 q15, q12, %f14[0] \n"
"pld [%5, #192] \n"
"vld1.f32 {d18-d20}, [%5 :64] \n"// r2
"add %5, #16 \n"
"vmla.f32 q7, q9, %e16[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q6, q11, %e16[1] \n"
"vmla.f32 q13, q12, %f16[0] \n"
"vmla.f32 q8, q9, %e15[0] \n"
"vmla.f32 q14, q11, %e15[1] \n"
"vmla.f32 q15, q12, %f15[0] \n"
"pld [%6, #192] \n"
"vld1.f32 {d18-d20}, [%6] \n"// r3
"add %6, #16 \n"
"vmla.f32 q8, q9, %e16[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q14, q11, %e16[1] \n"
"vmla.f32 q15, q12, %f16[0] \n"
"vadd.f32 q7, q7, q6 \n"
"pld [%3, #192] \n"
"vld1.f32 {d18-d20}, [%3 :64] \n"// r0
"vadd.f32 q8, q8, q14 \n"
"vadd.f32 q7, q7, q13 \n"
"vadd.f32 q8, q8, q15 \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"add %3, #16 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"vst1.f32 {d16-d17}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %3, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k3456), // %15
"w"(_k6789) // %16
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
float32x4_t _sum2 = vmulq_f32(_r10, _k0123);
_sum2 = vmlaq_f32(_sum2, _r20, _k3456);
_sum2 = vmlaq_f32(_sum2, _r30, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
_sum2 = vsetq_lane_f32(*outptr2, _sum2, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
*outptr2 = vaddvq_f32(_sum2);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2));
float32x2_t _sss2 = vpadd_f32(_ss, _ss2);
*outptr = vget_lane_f32(_sss2, 0);
*outptr2 = vget_lane_f32(_sss2, 1);
#endif // __aarch64__
#else
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
#endif
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum1 = vld1q_f32(outptr);
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r00n = vld1q_f32(r0 + 4);
float32x4_t _r01 = vextq_f32(_r00, _r00n, 1);
float32x4_t _r02 = vextq_f32(_r00, _r00n, 2);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r10n = vld1q_f32(r1 + 4);
float32x4_t _r11 = vextq_f32(_r10, _r10n, 1);
float32x4_t _r12 = vextq_f32(_r10, _r10n, 2);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r20n = vld1q_f32(r2 + 4);
float32x4_t _r21 = vextq_f32(_r20, _r20n, 1);
float32x4_t _r22 = vextq_f32(_r20, _r20n, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r00, _k0123, 0);
float32x4_t _sum2 = vmulq_laneq_f32(_r01, _k0123, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r02, _k0123, 2);
_sum2 = vfmaq_laneq_f32(_sum2, _r10, _k3456, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r11, _k3456, 1);
_sum2 = vfmaq_laneq_f32(_sum2, _r12, _k3456, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r20, _k6789, 0);
_sum2 = vfmaq_laneq_f32(_sum2, _r21, _k6789, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r22, _k6789, 2);
_sum1 = vaddq_f32(_sum1, _sum2);
vst1q_f32(outptr, _sum1);
r0 += 4;
r1 += 4;
r2 += 4;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #192] \n"
"vld1.f32 {d16-d18}, [%2] \n"// r0
"add %2, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1] \n"// _sum
"vmla.f32 q7, q8, %e10[0] \n"
"vmul.f32 q13, q10, %e10[1] \n"
"vmul.f32 q14, q11, %f10[0] \n"
"pld [%3, #192] \n"
"vld1.f32 {d16-d18}, [%3] \n"// r1
"add %3, #16 \n"
"vmla.f32 q7, q8, %e11[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q13, q10, %e11[1] \n"
"vmla.f32 q14, q11, %f11[0] \n"
"pld [%4, #192] \n"
"vld1.f32 {d16-d18}, [%4] \n"// r2
"add %4, #16 \n"
"vmla.f32 q7, q8, %e12[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q13, q10, %e12[1] \n"
"vmla.f32 q14, q11, %f12[0] \n"
"pld [%2, #192] \n"
"vld1.f32 {d16-d18}, [%2] \n"// r0
"add %2, #16 \n"
"vadd.f32 q7, q7, q13 \n"
"vadd.f32 q7, q7, q14 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
#endif
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_winograd64_transform_kernel_neon(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(8*8, inch, outch);
const float ktm[8][3] = {
{ 1.0f, 0.0f, 0.0f},
{-2.0f/9, -2.0f/9, -2.0f/9},
{-2.0f/9, 2.0f/9, -2.0f/9},
{1.0f/90, 1.0f/45, 2.0f/45},
{1.0f/90, -1.0f/45, 2.0f/45},
{1.0f/45, 1.0f/90, 1.0f/180},
{1.0f/45, -1.0f/90, 1.0f/180},
{ 0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i=0; i<8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j=0; j<8; j++)
{
float* tmpp = &tmp[j][0];
for (int i=0; i<8; i++)
{
kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd64_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
bottom_blob_tm.create(8*8, w_tm/8 * h_tm/8, inch);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
const float* r0 = img0.row(i * 6) + j * 6;
float* r0_tm = img0_tm.row(i * w_tm/8 + j);
// TODO neon optimize
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]);
r0_tm[1] = tmp12a + tmp12b;
r0_tm[2] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
r0_tm[3] = tmp34a + tmp34b;
r0_tm[4] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
r0_tm[5] = tmp56a + tmp56b;
r0_tm[6] = tmp56a - tmp56b;
r0_tm += 8;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
top_blob_tm.create(8*8, w_tm/8 * h_tm/8, outch);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
out0_tm.fill(0.f);
out1_tm.fill(0.f);
out2_tm.fill(0.f);
out3_tm.fill(0.f);
int q = 0;
for (; q+3<inch; q+=4)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* r2 = bottom_blob_tm.channel(q+2);
const float* r3 = bottom_blob_tm.channel(q+3);
const float* k00 = kernel0_tm.row(q);
const float* k10 = kernel1_tm.row(q);
const float* k20 = kernel2_tm.row(q);
const float* k30 = kernel3_tm.row(q);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
// tile
for (int i=0; i<h_tm/8 * w_tm/8; i++)
{
#if __ARM_NEON
#if __aarch64__
for (int m=0; m+7<64; m+=8)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output2_tm = vld1q_f32(output2_tm);
float32x4_t _output3_tm = vld1q_f32(output3_tm);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _k00 = vld1q_f32(k00);
k00 += 64;
float32x4_t _k01 = vld1q_f32(k00);
k00 += 64;
float32x4_t _k02 = vld1q_f32(k00);
k00 += 64;
float32x4_t _k03 = vld1q_f32(k00);
k00 += 64;
k00 -= 64*4;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tm = vmlaq_f32(_output0_tm, _r2, _k02);
_output0_tm = vmlaq_f32(_output0_tm, _r3, _k03);
float32x4_t _k10 = vld1q_f32(k10);
k10 += 64;
float32x4_t _k11 = vld1q_f32(k10);
k10 += 64;
float32x4_t _k12 = vld1q_f32(k10);
k10 += 64;
float32x4_t _k13 = vld1q_f32(k10);
k10 += 64;
k10 -= 64*4;
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tm = vmlaq_f32(_output1_tm, _r2, _k12);
_output1_tm = vmlaq_f32(_output1_tm, _r3, _k13);
float32x4_t _k20 = vld1q_f32(k20);
k20 += 64;
float32x4_t _k21 = vld1q_f32(k20);
k20 += 64;
float32x4_t _k22 = vld1q_f32(k20);
k20 += 64;
float32x4_t _k23 = vld1q_f32(k20);
k20 += 64;
k20 -= 64*4;
_output2_tm = vmlaq_f32(_output2_tm, _r0, _k20);
_output2_tm = vmlaq_f32(_output2_tm, _r1, _k21);
_output2_tm = vmlaq_f32(_output2_tm, _r2, _k22);
_output2_tm = vmlaq_f32(_output2_tm, _r3, _k23);
float32x4_t _k30 = vld1q_f32(k30);
k30 += 64;
float32x4_t _k31 = vld1q_f32(k30);
k30 += 64;
float32x4_t _k32 = vld1q_f32(k30);
k30 += 64;
float32x4_t _k33 = vld1q_f32(k30);
k30 += 64;
k30 -= 64*4;
_output3_tm = vmlaq_f32(_output3_tm, _r0, _k30);
_output3_tm = vmlaq_f32(_output3_tm, _r1, _k31);
_output3_tm = vmlaq_f32(_output3_tm, _r2, _k32);
_output3_tm = vmlaq_f32(_output3_tm, _r3, _k33);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output2_tm, _output2_tm);
vst1q_f32(output3_tm, _output3_tm);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
k00 += 4;
k10 += 4;
k20 += 4;
k30 += 4;
float32x4_t _output0_tmn = vld1q_f32(output0_tm);
float32x4_t _output1_tmn = vld1q_f32(output1_tm);
float32x4_t _output2_tmn = vld1q_f32(output2_tm);
float32x4_t _output3_tmn = vld1q_f32(output3_tm);
float32x4_t _r0n = vld1q_f32(r0);
float32x4_t _r1n = vld1q_f32(r1);
float32x4_t _r2n = vld1q_f32(r2);
float32x4_t _r3n = vld1q_f32(r3);
float32x4_t _k00n = vld1q_f32(k00);
k00 += 64;
float32x4_t _k01n = vld1q_f32(k00);
k00 += 64;
float32x4_t _k02n = vld1q_f32(k00);
k00 += 64;
float32x4_t _k03n = vld1q_f32(k00);
k00 += 64;
k00 -= 64*4;
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r2n, _k02n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r3n, _k03n);
float32x4_t _k10n = vld1q_f32(k10);
k10 += 64;
float32x4_t _k11n = vld1q_f32(k10);
k10 += 64;
float32x4_t _k12n = vld1q_f32(k10);
k10 += 64;
float32x4_t _k13n = vld1q_f32(k10);
k10 += 64;
k10 -= 64*4;
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
_output1_tmn = vmlaq_f32(_output1_tmn, _r2n, _k12n);
_output1_tmn = vmlaq_f32(_output1_tmn, _r3n, _k13n);
float32x4_t _k20n = vld1q_f32(k20);
k20 += 64;
float32x4_t _k21n = vld1q_f32(k20);
k20 += 64;
float32x4_t _k22n = vld1q_f32(k20);
k20 += 64;
float32x4_t _k23n = vld1q_f32(k20);
k20 += 64;
k20 -= 64*4;
_output2_tmn = vmlaq_f32(_output2_tmn, _r0n, _k20n);
_output2_tmn = vmlaq_f32(_output2_tmn, _r1n, _k21n);
_output2_tmn = vmlaq_f32(_output2_tmn, _r2n, _k22n);
_output2_tmn = vmlaq_f32(_output2_tmn, _r3n, _k23n);
float32x4_t _k30n = vld1q_f32(k30);
k30 += 64;
float32x4_t _k31n = vld1q_f32(k30);
k30 += 64;
float32x4_t _k32n = vld1q_f32(k30);
k30 += 64;
float32x4_t _k33n = vld1q_f32(k30);
k30 += 64;
k30 -= 64*4;
_output3_tmn = vmlaq_f32(_output3_tmn, _r0n, _k30n);
_output3_tmn = vmlaq_f32(_output3_tmn, _r1n, _k31n);
_output3_tmn = vmlaq_f32(_output3_tmn, _r2n, _k32n);
_output3_tmn = vmlaq_f32(_output3_tmn, _r3n, _k33n);
vst1q_f32(output0_tm, _output0_tmn);
vst1q_f32(output1_tm, _output1_tmn);
vst1q_f32(output2_tm, _output2_tmn);
vst1q_f32(output3_tm, _output3_tmn);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
k00 += 4;
k10 += 4;
k20 += 4;
k30 += 4;
}
#else // __aarch64__
asm volatile(
"mov r4, #8 \n"
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128]\n"//q8 q9 = _output0_tm
"0: \n"
"pld [%4, #256] \n"
"vld1.f32 {d0-d3}, [%4 :128]! \n"//q0 q1 = _r0
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k00
"add %8, %8, #256 \n"
"vmla.f32 q8, q0, q10 \n"
"vmla.f32 q9, q1, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]\n"//q12 q13 = _output1_tm
"pld [%9, #256] \n"
"vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k10
"add %9, %9, #256 \n"
"vmla.f32 q12, q0, q14 \n"
"vmla.f32 q13, q1, q15 \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"//q2 q3 = _r1
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k01
"add %8, %8, #256 \n"
"vmla.f32 q8, q2, q10 \n"
"vmla.f32 q9, q3, q11 \n"
"pld [%9, #256] \n"
"vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k11
"add %9, %9, #256 \n"
"vmla.f32 q12, q2, q14 \n"
"vmla.f32 q13, q3, q15 \n"
"pld [%6, #256] \n"
"vld1.f32 {d8-d11}, [%6 :128]!\n"//q4 q5 = _r2
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k02
"add %8, %8, #256 \n"
"vmla.f32 q8, q4, q10 \n"
"vmla.f32 q9, q5, q11 \n"
"pld [%9, #256] \n"
"vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k12
"add %9, %9, #256 \n"
"vmla.f32 q12, q4, q14 \n"
"vmla.f32 q13, q5, q15 \n"
"pld [%7, #256] \n"
"vld1.f32 {d12-d15}, [%7 :128]!\n"//q6 q7 = _r3
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k03
"sub %8, %8, #736 \n"
"vmla.f32 q8, q6, q10 \n"
"vmla.f32 q9, q7, q11 \n"
"pld [%9, #256] \n"
"vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k13
"sub %9, %9, #736 \n"
"vmla.f32 q12, q6, q14 \n"
"vmla.f32 q13, q7, q15 \n"
"vst1.f32 {d16-d19}, [%0 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]\n"//q8 q9 = _output2_tm
"pld [%10, #256] \n"
"vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k20
"add %10, %10, #256 \n"
"vmla.f32 q8, q0, q10 \n"
"vmla.f32 q9, q1, q11 \n"
"vst1.f32 {d24-d27}, [%1 :128]!\n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]\n"//q12 q13 = _output3_tm
"pld [%11, #256] \n"
"vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k30
"add %11, %11, #256 \n"
"vmla.f32 q12, q0, q14 \n"
"vmla.f32 q13, q1, q15 \n"
"pld [%10, #256] \n"
"vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k21
"add %10, %10, #256 \n"
"vmla.f32 q8, q2, q10 \n"
"vmla.f32 q9, q3, q11 \n"
"pld [%11, #256] \n"
"vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k31
"add %11, %11, #256 \n"
"vmla.f32 q12, q2, q14 \n"
"vmla.f32 q13, q3, q15 \n"
"pld [%10, #256] \n"
"vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k22
"add %10, %10, #256 \n"
"vmla.f32 q8, q4, q10 \n"
"vmla.f32 q9, q5, q11 \n"
"pld [%11, #256] \n"
"vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k32
"add %11, %11, #256 \n"
"vmla.f32 q12, q4, q14 \n"
"vmla.f32 q13, q5, q15 \n"
"pld [%10, #256] \n"
"vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k23
"sub %10, %10, #736 \n"
"vmla.f32 q8, q6, q10 \n"
"vmla.f32 q9, q7, q11 \n"
"pld [%11, #256] \n"
"vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k33
"sub %11, %11, #736 \n"
"vmla.f32 q12, q6, q14 \n"
"vmla.f32 q13, q7, q15 \n"
"vst1.f32 {d16-d19}, [%2 :128]!\n"
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128]\n"//q8 q9 = _output0_tm
"subs r4, r4, #1 \n"
"vst1.f32 {d24-d27}, [%3 :128]!\n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(r2), // %6
"=r"(r3), // %7
"=r"(k00), // %8
"=r"(k10), // %9
"=r"(k20), // %10
"=r"(k30) // %11
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(r1),
"6"(r2),
"7"(r3),
"8"(k00),
"9"(k10),
"10"(k20),
"11"(k30)
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
k00 -= 64;
k10 -= 64;
k20 -= 64;
k30 -= 64;
#else
for (int m=0; m<64; m++)
{
output0_tm[m] += r0[m] * k00[m];
k00 += 64;
output0_tm[m] += r1[m] * k00[m];
k00 += 64;
output0_tm[m] += r2[m] * k00[m];
k00 += 64;
output0_tm[m] += r3[m] * k00[m];
k00 += 64;
k00 -= 64 * 4;
output1_tm[m] += r0[m] * k10[m];
k10 += 64;
output1_tm[m] += r1[m] * k10[m];
k10 += 64;
output1_tm[m] += r2[m] * k10[m];
k10 += 64;
output1_tm[m] += r3[m] * k10[m];
k10 += 64;
k10 -= 64 * 4;
output2_tm[m] += r0[m] * k20[m];
k20 += 64;
output2_tm[m] += r1[m] * k20[m];
k20 += 64;
output2_tm[m] += r2[m] * k20[m];
k20 += 64;
output2_tm[m] += r3[m] * k20[m];
k20 += 64;
k20 -= 64 * 4;
output3_tm[m] += r0[m] * k30[m];
k30 += 64;
output3_tm[m] += r1[m] * k30[m];
k30 += 64;
output3_tm[m] += r2[m] * k30[m];
k30 += 64;
output3_tm[m] += r3[m] * k30[m];
k30 += 64;
k30 -= 64 * 4;
}
r0 += 64;
r1 += 64;
r2 += 64;
r3 += 64;
output0_tm += 64;
output1_tm += 64;
output2_tm += 64;
output3_tm += 64;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
// tile
for (int i=0; i<h_tm/8 * w_tm/8; i++)
{
// TODO neon optimize
for (int m=0; m<64; m++)
{
output0_tm[m] += r0[m] * k0[m];
output1_tm[m] += r0[m] * k1[m];
output2_tm[m] += r0[m] * k2[m];
output3_tm[m] += r0[m] * k3[m];
}
r0 += 64;
output0_tm += 64;
output1_tm += 64;
output2_tm += 64;
output3_tm += 64;
}
}
}
#pragma omp parallel for
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
out0_tm.fill(0.f);
int q = 0;
for (; q+3<inch; q+=4)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* r2 = bottom_blob_tm.channel(q+2);
const float* r3 = bottom_blob_tm.channel(q+3);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q+1);
const float* k2 = kernel0_tm.row(q+2);
const float* k3 = kernel0_tm.row(q+3);
float* output0_tm = out0_tm;
// tile
for (int i=0; i<h_tm/8 * w_tm/8; i++)
{
#if __ARM_NEON
#if __aarch64__
for (int m=0; m+7<64; m+=8)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
float32x4_t _k3 = vld1q_f32(k3);
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tm = vmlaq_f32(_output0_tm, _r2, _k2);
_output0_tm = vmlaq_f32(_output0_tm, _r3, _k3);
vst1q_f32(output0_tm, _output0_tm);
output0_tm += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
float32x4_t _output0_tmn = vld1q_f32(output0_tm);
float32x4_t _r0n = vld1q_f32(r0);
float32x4_t _r1n = vld1q_f32(r1);
float32x4_t _r2n = vld1q_f32(r2);
float32x4_t _r3n = vld1q_f32(r3);
float32x4_t _k0n = vld1q_f32(k0);
float32x4_t _k1n = vld1q_f32(k1);
float32x4_t _k2n = vld1q_f32(k2);
float32x4_t _k3n = vld1q_f32(k3);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r2n, _k2n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r3n, _k3n);
vst1q_f32(output0_tm, _output0_tmn);
output0_tm += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
}
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"mov r4, %0 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q13, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm
"vmla.f32 q13, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"vst1.f32 {d24-d27}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q15, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm
"vmla.f32 q15, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"vst1.f32 {d28-d31}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q13, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm
"vmla.f32 q13, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"vst1.f32 {d24-d27}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q15, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm
"vmla.f32 q15, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"vst1.f32 {d28-d31}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q13, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm
"vmla.f32 q13, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"vst1.f32 {d24-d27}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q15, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm
"vmla.f32 q15, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"vst1.f32 {d28-d31}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q13, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm
"vmla.f32 q13, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"vst1.f32 {d24-d27}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q15, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"vmla.f32 q15, q9, q11 \n"
"vst1.f32 {d28-d31}, [r4 :128]!\n"
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(k0), // %5
"=r"(k1), // %6
"=r"(k2), // %7
"=r"(k3) // %8
: "0"(output0_tm),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(k0),
"6"(k1),
"7"(k2),
"8"(k3)
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
k0 -= 64;
k1 -= 64;
k2 -= 64;
k3 -= 64;
#else
for (int m=0; m<64; m++)
{
output0_tm[m] += r0[m] * k0[m];
output0_tm[m] += r1[m] * k1[m];
output0_tm[m] += r2[m] * k2[m];
output0_tm[m] += r3[m] * k3[m];
}
r0 += 64;
r1 += 64;
r2 += 64;
r3 += 64;
output0_tm += 64;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k0 = kernel0_tm.row(q);
float* output0_tm = out0_tm;
// tile
for (int i=0; i<h_tm/8 * w_tm/8; i++)
{
// TODO neon optimize
for (int m=0; m<64; m++)
{
output0_tm[m] += r0[m] * k0[m];
}
r0 += 64;
output0_tm += 64;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
const float* output0_tm = out0_tm.row(i * w_tm/8 + j);
float* output0 = out0.row(i * 6) + j * 6;
// TODO neon optimize
for (int m=0; m<8; m++)
{
float tmp024a = output0_tm[1] + output0_tm[2];
float tmp135a = output0_tm[1] - output0_tm[2];
float tmp024b = output0_tm[3] + output0_tm[4];
float tmp135b = output0_tm[3] - output0_tm[4];
float tmp024c = output0_tm[5] + output0_tm[6];
float tmp135c = output0_tm[5] - output0_tm[6];
tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm += 8;
}
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w);
}
static void conv3x3s1_winograd64_neon2(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
bottom_blob_tm.create(2*8, 4 * w_tm/8 * h_tm/8, inch);
const int tiles = w_tm/8 * h_tm/8;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
const float* r0 = img0.row(i * 6) + j * 6;
float* r0_tm01 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm23 = img0_tm.row(tiles + i * w_tm/8 + j);
float* r0_tm45 = img0_tm.row(tiles * 2 + i * w_tm/8 + j);
float* r0_tm67 = img0_tm.row(tiles * 3 + i * w_tm/8 + j);
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
float* r0_tms[4] = { r0_tm01, r0_tm23, r0_tm45, r0_tm67 };
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
float* r0_tm = r0_tms[m/2] + (m%2) * 8;
r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]);
r0_tm[1] = tmp12a + tmp12b;
r0_tm[2] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
r0_tm[3] = tmp34a + tmp34b;
r0_tm[4] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
r0_tm[5] = tmp56a + tmp56b;
r0_tm[6] = tmp56a - tmp56b;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
top_blob_tm.create(2*8, 4 * w_tm/8 * h_tm/8, outch);
const int tiles = h_tm/8 * w_tm/8;
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
out0_tm.fill(0.f);
int q = 0;
for (; q+1<inch; q+=2)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q+1);
float* output0_tm = out0_tm;
for (int r=0; r<4; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k0n = vld1q_f32(k0+4);
float32x4_t _k0nn = vld1q_f32(k0+8);
float32x4_t _k0nnn = vld1q_f32(k0+12);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k1n = vld1q_f32(k1+4);
float32x4_t _k1nn = vld1q_f32(k1+8);
float32x4_t _k1nnn = vld1q_f32(k1+12);
#else
float32x4_t _k0;
float32x4_t _k0n;
float32x4_t _k0nn;
float32x4_t _k0nnn;
float32x4_t _k1;
float32x4_t _k1n;
float32x4_t _k1nn;
float32x4_t _k1nnn;
asm volatile(
"pld [%0, #512] \n"
"vld1.f32 {%e2-%f2}, [%0 :128]! \n"
"pld [%1, #512] \n"
"vld1.f32 {%e4-%f4}, [%1 :128]! \n"
"vld1.f32 {%e3-%f3}, [%0 :128]! \n"
"vld1.f32 {%e5-%f5}, [%1 :128]! \n"
"vld1.f32 {%e6-%f6}, [%0 :128]! \n"
"vld1.f32 {%e8-%f8}, [%1 :128]! \n"
"vld1.f32 {%e7-%f7}, [%0 :128]! \n"
"vld1.f32 {%e9-%f9}, [%1 :128]! \n"
: "=r"(k0), // %0
"=r"(k1), // %1
"=w"(_k0), // %2
"=w"(_k0n), // %3
"=w"(_k1), // %4
"=w"(_k1n), // %5
"=w"(_k0nn), // %6
"=w"(_k0nnn), // %7
"=w"(_k1nn), // %8
"=w"(_k1nnn) // %9
: "0"(k0),
"1"(k1)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
#if __ARM_NEON
int nn = tiles >> 2;
int remain = tiles & 3;
#else
int remain = tiles;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
}
#else
if (nn > 0)
{
asm volatile(
"mov r4, %1 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm
"vmla.f32 q10, q12, %q12 \n"
"vmla.f32 q11, q13, %q13 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q14 \n"
"vmla.f32 q11, q15, %q15 \n"
"vst1.f32 {d16-d19}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d20-d23}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm
"vmla.f32 q10, q12, %q12 \n"
"vmla.f32 q11, q13, %q13 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q14 \n"
"vmla.f32 q11, q15, %q15 \n"
"vst1.f32 {d16-d19}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d20-d23}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm
"vmla.f32 q10, q12, %q12 \n"
"vmla.f32 q11, q13, %q13 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q14 \n"
"vmla.f32 q11, q15, %q15 \n"
"vst1.f32 {d16-d19}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d20-d23}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm
"vmla.f32 q10, q12, %q12 \n"
"vmla.f32 q11, q13, %q13 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q14 \n"
"vmla.f32 q11, q15, %q15 \n"
"vst1.f32 {d16-d19}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"subs %0, #1 \n"
"vst1.f32 {d20-d23}, [r4 :128]! \n"
"bne 0b \n"
"sub %1, #32 \n"
"sub %2, #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(r1) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(r1),
"w"(_k0), // %8
"w"(_k0n), // %9
"w"(_k1), // %10
"w"(_k1n), // %11
"w"(_k0nn), // %12
"w"(_k0nnn), // %13
"w"(_k1nn), // %14
"w"(_k1nnn) // %15
: "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
#else
asm volatile(
"mov r4, %0 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q6 \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1
"vmla.f32 q9, q13, %q7 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"vmla.f32 q8, q14, %q8 \n"
"pld [%0, #256] \n"
"vld1.f32 {d20-d23}, [%0 :128] \n"// q10 q11 = _output0_tm
"vmla.f32 q9, q15, %q9 \n"
"vmla.f32 q10, q12, %q10 \n"
"vmla.f32 q11, q13, %q11 \n"
"vst1.f32 {d16-d19}, [r4 :128] \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1
"vmla.f32 q10, q14, %q12 \n"
"vmla.f32 q11, q15, %q13 \n"
"vst1.f32 {d20-d23}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(r1) // %2
: "0"(output0_tm),
"1"(r0),
"2"(r1),
"w"(_k0), // %6
"w"(_k0n), // %7
"w"(_k1), // %8
"w"(_k1n), // %9
"w"(_k0nn), // %10
"w"(_k0nnn), // %11
"w"(_k1nn), // %12
"w"(_k1nnn) // %13
: "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
for (int m=0; m<16; m++)
{
output0_tm[m] += r0[m] * k0[m];
output0_tm[m] += r1[m] * k1[m];
}
r0 += 16;
r1 += 16;
output0_tm += 16;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k0 += 16;
k1 += 16;
#endif // __aarch64__
#else
k0 += 16;
k1 += 16;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k0 = kernel0_tm.row(q);
float* output0_tm = out0_tm;
for (int r=0; r<4; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k0n = vld1q_f32(k0+4);
float32x4_t _k0nn = vld1q_f32(k0+8);
float32x4_t _k0nnn = vld1q_f32(k0+12);
#else
float32x4_t _k0;
float32x4_t _k0n;
float32x4_t _k0nn;
float32x4_t _k0nnn;
asm volatile(
"pld [%0, #512] \n"
"vld1.f32 {%e1-%f1}, [%0 :128]! \n"
"vld1.f32 {%e2-%f2}, [%0 :128]! \n"
"vld1.f32 {%e3-%f3}, [%0 :128]! \n"
"vld1.f32 {%e4-%f4}, [%0 :128]! \n"
: "=r"(k0), // %0
"=w"(_k0), // %1
"=w"(_k0n), // %2
"=w"(_k0nn), // %3
"=w"(_k0nnn) // %4
: "0"(k0)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
for (int i=0; i<tiles; i++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
#else
asm volatile(
"mov r4, %0 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q4 \n"
"vmla.f32 q9, q13, %q5 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d20-d23}, [%0 :128] \n"// q10 q11 = _output0_tm
"vmla.f32 q10, q12, %q6 \n"
"vst1.f32 {d16-d19}, [r4 :128] \n"
"vmla.f32 q11, q13, %q7 \n"
"vst1.f32 {d20-d23}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0) // %1
: "0"(output0_tm),
"1"(r0),
"w"(_k0), // %4
"w"(_k0n), // %5
"w"(_k0nn), // %6
"w"(_k0nnn) // %7
: "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
for (int m=0; m<16; m++)
{
output0_tm[m] += r0[m] * k0[m];
}
r0 += 16;
output0_tm += 16;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k0 += 16;
#endif // __aarch64__
#else
k0 += 16;
#endif // __ARM_NEON
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
const float* output0_tm01 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm23 = out0_tm.row(tiles + i * w_tm/8 + j);
const float* output0_tm45 = out0_tm.row(tiles * 2 + i * w_tm/8 + j);
const float* output0_tm67 = out0_tm.row(tiles * 3 + i * w_tm/8 + j);
float* output0 = out0.row(i * 6) + j * 6;
const float* output0_tms[4] = { output0_tm01, output0_tm23, output0_tm45, output0_tm67 };
for (int m=0; m<8; m++)
{
const float* output0_tm = output0_tms[m/2] + (m%2) * 8;
float tmp024a = output0_tm[1] + output0_tm[2];
float tmp135a = output0_tm[1] - output0_tm[2];
float tmp024b = output0_tm[3] + output0_tm[4];
float tmp135b = output0_tm[3] - output0_tm[4];
float tmp024c = output0_tm[5] + output0_tm[6];
float tmp135c = output0_tm[5] - output0_tm[6];
tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
}
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w);
}
static void conv3x3s1_winograd64_neon3(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
bottom_blob_tm.create(8, 8 * w_tm/8 * h_tm/8, inch);
const int tiles = w_tm/8 * h_tm/8;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
const float* r0 = img0.row(i * 6) + j * 6;
float* r0_tm0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm1 = img0_tm.row(i * w_tm/8 + j + tiles);
float* r0_tm2 = img0_tm.row(i * w_tm/8 + j + tiles * 2);
float* r0_tm3 = img0_tm.row(i * w_tm/8 + j + tiles * 3);
float* r0_tm4 = img0_tm.row(i * w_tm/8 + j + tiles * 4);
float* r0_tm5 = img0_tm.row(i * w_tm/8 + j + tiles * 5);
float* r0_tm6 = img0_tm.row(i * w_tm/8 + j + tiles * 6);
float* r0_tm7 = img0_tm.row(i * w_tm/8 + j + tiles * 7);
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
float* r0_tms[8] = { r0_tm0, r0_tm1, r0_tm2, r0_tm3, r0_tm4, r0_tm5, r0_tm6, r0_tm7 };
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
float* r0_tm = r0_tms[m];
r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]);
r0_tm[1] = tmp12a + tmp12b;
r0_tm[2] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
r0_tm[3] = tmp34a + tmp34b;
r0_tm[4] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
r0_tm[5] = tmp56a + tmp56b;
r0_tm[6] = tmp56a - tmp56b;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
top_blob_tm.create(8, 8 * w_tm/8 * h_tm/8, outch);
const int tiles = h_tm/8 * w_tm/8;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
out0_tm.fill(0.f);
out1_tm.fill(0.f);
int q = 0;
for (; q+1<inch; q+=2)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q+1);
const float* k10 = kernel1_tm.row(q);
const float* k11 = kernel1_tm.row(q+1);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
for (int r=0; r<8; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k00 = vld1q_f32(k00);
float32x4_t _k00n = vld1q_f32(k00+4);
float32x4_t _k01 = vld1q_f32(k01);
float32x4_t _k01n = vld1q_f32(k01+4);
float32x4_t _k10 = vld1q_f32(k10);
float32x4_t _k10n = vld1q_f32(k10+4);
float32x4_t _k11 = vld1q_f32(k11);
float32x4_t _k11n = vld1q_f32(k11+4);
#else
float32x4_t _k00;
float32x4_t _k00n;
float32x4_t _k01;
float32x4_t _k01n;
float32x4_t _k10;
float32x4_t _k10n;
float32x4_t _k11;
float32x4_t _k11n;
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {%e4-%f4}, [%0 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {%e6-%f6}, [%1 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {%e8-%f8}, [%2 :128]! \n"
"pld [%3, #256] \n"
"vld1.f32 {%e10-%f10}, [%3 :128]! \n"
"vld1.f32 {%e5-%f5}, [%0 :128]! \n"
"vld1.f32 {%e7-%f7}, [%1 :128]! \n"
"vld1.f32 {%e9-%f9}, [%2 :128]! \n"
"vld1.f32 {%e11-%f11}, [%3 :128]! \n"
: "=r"(k00), // %0
"=r"(k01), // %1
"=r"(k10), // %2
"=r"(k11), // %3
"=w"(_k00), // %4
"=w"(_k00n), // %5
"=w"(_k01), // %6
"=w"(_k01n), // %7
"=w"(_k10), // %8
"=w"(_k10n), // %9
"=w"(_k11), // %10
"=w"(_k11n) // %11
: "0"(k00),
"1"(k01),
"2"(k10),
"3"(k11)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
#if __ARM_NEON
int nn = tiles >> 2;
int remain = tiles & 3;
#else
int remain = tiles;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output1_tmn = vld1q_f32(output1_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q10 \n"
"vmla.f32 q9, q13, %q11 \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q12 \n"
"vmla.f32 q9, q15, %q13 \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q14 \n"
"vmla.f32 q11, q13, %q15 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q16 \n"
"vmla.f32 q11, q15, %q17 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q10 \n"
"vmla.f32 q9, q13, %q11 \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q12 \n"
"vmla.f32 q9, q15, %q13 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q14 \n"
"vmla.f32 q11, q13, %q15 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q16 \n"
"vmla.f32 q11, q15, %q17 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q10 \n"
"vmla.f32 q9, q13, %q11 \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q12 \n"
"vmla.f32 q9, q15, %q13 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q14 \n"
"vmla.f32 q11, q13, %q15 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q16 \n"
"vmla.f32 q11, q15, %q17 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q10 \n"
"vmla.f32 q9, q13, %q11 \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q12 \n"
"vmla.f32 q9, q15, %q13 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q14 \n"
"vmla.f32 q11, q13, %q15 \n"
"vmla.f32 q10, q14, %q16 \n"
"vmla.f32 q11, q15, %q17 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(r1) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(r1),
"w"(_k00), // %10
"w"(_k00n), // %11
"w"(_k01), // %12
"w"(_k01n), // %13
"w"(_k10), // %14
"w"(_k10n), // %15
"w"(_k11), // %16
"w"(_k11n) // %17
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output1_tmn = vld1q_f32(output1_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
#else
asm volatile(
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q12 \n"
"vmla.f32 q11, q13, %q13 \n"
"vmla.f32 q10, q14, %q14 \n"
"vmla.f32 q11, q15, %q15 \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(r0), // %2
"=r"(r1) // %3
: "0"(output0_tm),
"1"(output1_tm),
"2"(r0),
"3"(r1),
"w"(_k00), // %8
"w"(_k00n), // %9
"w"(_k01), // %10
"w"(_k01n), // %11
"w"(_k10), // %12
"w"(_k10n), // %13
"w"(_k11), // %14
"w"(_k11n) // %15
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
for (int m=0; m<8; m++)
{
output0_tm[m] += r0[m] * k00[m];
output0_tm[m] += r1[m] * k01[m];
output1_tm[m] += r0[m] * k10[m];
output1_tm[m] += r1[m] * k11[m];
}
r0 += 8;
r1 += 8;
output0_tm += 8;
output1_tm += 8;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k00 += 8;
k01 += 8;
k10 += 8;
k11 += 8;
#endif // __aarch64__
#else
k00 += 8;
k01 += 8;
k10 += 8;
k11 += 8;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k00 = kernel0_tm.row(q);
const float* k10 = kernel1_tm.row(q);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
for (int r=0; r<8; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k00 = vld1q_f32(k00);
float32x4_t _k00n = vld1q_f32(k00+4);
float32x4_t _k10 = vld1q_f32(k10);
float32x4_t _k10n = vld1q_f32(k10+4);
#else
float32x4_t _k00;
float32x4_t _k00n;
float32x4_t _k10;
float32x4_t _k10n;
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {%e2-%f2}, [%0 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {%e4-%f4}, [%1 :128]! \n"
"vld1.f32 {%e3-%f3}, [%0 :128]! \n"
"vld1.f32 {%e5-%f5}, [%1 :128]! \n"
: "=r"(k00), // %0
"=r"(k10), // %1
"=w"(_k00), // %2
"=w"(_k00n), // %3
"=w"(_k10), // %4
"=w"(_k10n) // %5
: "0"(k00),
"1"(k10)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
#if __ARM_NEON
int nn = tiles >> 2;
int remain = tiles & 3;
#else
int remain = tiles;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output1_tmn = vld1q_f32(output1_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q10 \n"
"vmla.f32 q11, q13, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q10 \n"
"vmla.f32 q11, q13, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q10 \n"
"vmla.f32 q11, q13, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q10 \n"
"vmla.f32 q11, q13, %q11 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"w"(_k00), // %8
"w"(_k00n), // %9
"w"(_k10), // %10
"w"(_k10n) // %11
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output1_tmn = vld1q_f32(output1_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
#else
asm volatile(
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q6 \n"
"vmla.f32 q9, q13, %q7 \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q8 \n"
"vmla.f32 q11, q13, %q9 \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(r0) // %2
: "0"(output0_tm),
"1"(output1_tm),
"2"(r0),
"w"(_k00), // %6
"w"(_k00n), // %7
"w"(_k10), // %8
"w"(_k10n) // %9
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13"
);
#endif // __aarch64__
#else
for (int m=0; m<8; m++)
{
output0_tm[m] += r0[m] * k00[m];
output1_tm[m] += r0[m] * k10[m];
}
r0 += 8;
output0_tm += 8;
output1_tm += 8;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k00 += 8;
k10 += 8;
#endif // __aarch64__
#else
k00 += 8;
k10 += 8;
#endif // __ARM_NEON
}
}
}
#pragma omp parallel for
for (int p = remain_outch_start; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
out0_tm.fill(0.f);
int q = 0;
for (; q+1<inch; q+=2)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q+1);
float* output0_tm = out0_tm;
for (int r=0; r<8; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k00 = vld1q_f32(k00);
float32x4_t _k00n = vld1q_f32(k00+4);
float32x4_t _k01 = vld1q_f32(k01);
float32x4_t _k01n = vld1q_f32(k01+4);
#else
float32x4_t _k00;
float32x4_t _k00n;
float32x4_t _k01;
float32x4_t _k01n;
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {%e2-%f2}, [%0 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {%e4-%f4}, [%1 :128]! \n"
"vld1.f32 {%e3-%f3}, [%0 :128]! \n"
"vld1.f32 {%e5-%f5}, [%1 :128]! \n"
: "=r"(k00), // %0
"=r"(k01), // %1
"=w"(_k00), // %2
"=w"(_k00n), // %3
"=w"(_k01), // %4
"=w"(_k01n) // %5
: "0"(k00),
"1"(k01)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
#if __ARM_NEON
int nn = tiles >> 2;
int remain = tiles & 3;
#else
int remain = tiles;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(r1) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(r1),
"w"(_k00), // %8
"w"(_k00n), // %9
"w"(_k01), // %10
"w"(_k01n) // %11
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q6 \n"
"vmla.f32 q9, q13, %q7 \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q8 \n"
"vmla.f32 q9, q15, %q9 \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(r1) // %2
: "0"(output0_tm),
"1"(r0),
"2"(r1),
"w"(_k00), // %6
"w"(_k00n), // %7
"w"(_k01), // %8
"w"(_k01n) // %9
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
for (int m=0; m<8; m++)
{
output0_tm[m] += r0[m] * k00[m];
output0_tm[m] += r1[m] * k01[m];
}
r0 += 8;
r1 += 8;
output0_tm += 8;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k00 += 8;
k01 += 8;
#endif // __aarch64__
#else
k00 += 8;
k01 += 8;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k00 = kernel0_tm.row(q);
float* output0_tm = out0_tm;
for (int r=0; r<8; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k00 = vld1q_f32(k00);
float32x4_t _k00n = vld1q_f32(k00+4);
#else
float32x4_t _k00;
float32x4_t _k00n;
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {%e1-%f1}, [%0 :128]! \n"
"vld1.f32 {%e2-%f2}, [%0 :128]! \n"
: "=r"(k00), // %0
"=w"(_k00), // %1
"=w"(_k00n) // %2
: "0"(k00)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
for (int i=0; i<tiles; i++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q4 \n"
"vmla.f32 q9, q13, %q5 \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0) // %1
: "0"(output0_tm),
"1"(r0),
"w"(_k00), // %4
"w"(_k00n) // %5
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
for (int m=0; m<8; m++)
{
output0_tm[m] += r0[m] * k00[m];
}
r0 += 8;
output0_tm += 8;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k00 += 8;
#endif // __aarch64__
#else
k00 += 8;
#endif // __ARM_NEON
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
const float* output0_tm0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm1 = out0_tm.row(i * w_tm/8 + j + tiles);
const float* output0_tm2 = out0_tm.row(i * w_tm/8 + j + tiles * 2);
const float* output0_tm3 = out0_tm.row(i * w_tm/8 + j + tiles * 3);
const float* output0_tm4 = out0_tm.row(i * w_tm/8 + j + tiles * 4);
const float* output0_tm5 = out0_tm.row(i * w_tm/8 + j + tiles * 5);
const float* output0_tm6 = out0_tm.row(i * w_tm/8 + j + tiles * 6);
const float* output0_tm7 = out0_tm.row(i * w_tm/8 + j + tiles * 7);
float* output0 = out0.row(i * 6) + j * 6;
const float* output0_tms[8] = { output0_tm0, output0_tm1, output0_tm2, output0_tm3, output0_tm4, output0_tm5, output0_tm6, output0_tm7 };
for (int m=0; m<8; m++)
{
const float* output0_tm = output0_tms[m];
float tmp024a = output0_tm[1] + output0_tm[2];
float tmp135a = output0_tm[1] - output0_tm[2];
float tmp024b = output0_tm[3] + output0_tm[4];
float tmp135b = output0_tm[3] - output0_tm[4];
float tmp024c = output0_tm[5] + output0_tm[6];
float tmp135c = output0_tm[5] - output0_tm[6];
tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
}
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w);
}
static void conv3x3s1_winograd64_neon4(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
bottom_blob_tm.create(4, 16 * w_tm/8 * h_tm/8, inch);
const int tiles = w_tm/8 * h_tm/8;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
const float* r0 = img0.row(i * 6) + j * 6;
float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles);
float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles * 2);
float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles * 3);
float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles * 4);
float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles * 5);
float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles * 6);
float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles * 7);
float* r0_tm4_0 = img0_tm.row(i * w_tm/8 + j + tiles * 8);
float* r0_tm4_4 = img0_tm.row(i * w_tm/8 + j + tiles * 9);
float* r0_tm5_0 = img0_tm.row(i * w_tm/8 + j + tiles * 10);
float* r0_tm5_4 = img0_tm.row(i * w_tm/8 + j + tiles * 11);
float* r0_tm6_0 = img0_tm.row(i * w_tm/8 + j + tiles * 12);
float* r0_tm6_4 = img0_tm.row(i * w_tm/8 + j + tiles * 13);
float* r0_tm7_0 = img0_tm.row(i * w_tm/8 + j + tiles * 14);
float* r0_tm7_4 = img0_tm.row(i * w_tm/8 + j + tiles * 15);
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
float* r0_tms_0[8] = { r0_tm0_0, r0_tm1_0, r0_tm2_0, r0_tm3_0, r0_tm4_0, r0_tm5_0, r0_tm6_0, r0_tm7_0 };
float* r0_tms_4[8] = { r0_tm0_4, r0_tm1_4, r0_tm2_4, r0_tm3_4, r0_tm4_4, r0_tm5_4, r0_tm6_4, r0_tm7_4 };
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
float* r0_tm_0 = r0_tms_0[m];
float* r0_tm_4 = r0_tms_4[m];
r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
r0_tm_4[3] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]);
r0_tm_0[1] = tmp12a + tmp12b;
r0_tm_0[2] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
r0_tm_0[3] = tmp34a + tmp34b;
r0_tm_4[0] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
r0_tm_4[1] = tmp56a + tmp56b;
r0_tm_4[2] = tmp56a - tmp56b;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
top_blob_tm.create(4, 16 * w_tm/8 * h_tm/8, outch);
const int tiles = h_tm/8 * w_tm/8;
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
out0_tm.fill(0.f);
out1_tm.fill(0.f);
out2_tm.fill(0.f);
out3_tm.fill(0.f);
int q = 0;
#if __ARM_NEON && __aarch64__
for (; q+3<inch; q+=4)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* r2 = bottom_blob_tm.channel(q+2);
const float* r3 = bottom_blob_tm.channel(q+3);
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q+1);
const float* k02 = kernel0_tm.row(q+2);
const float* k03 = kernel0_tm.row(q+3);
const float* k10 = kernel1_tm.row(q);
const float* k11 = kernel1_tm.row(q+1);
const float* k12 = kernel1_tm.row(q+2);
const float* k13 = kernel1_tm.row(q+3);
const float* k20 = kernel2_tm.row(q);
const float* k21 = kernel2_tm.row(q+1);
const float* k22 = kernel2_tm.row(q+2);
const float* k23 = kernel2_tm.row(q+3);
const float* k30 = kernel3_tm.row(q);
const float* k31 = kernel3_tm.row(q+1);
const float* k32 = kernel3_tm.row(q+2);
const float* k33 = kernel3_tm.row(q+3);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
for (int r=0; r<16; r++)
{
float32x4_t _k00 = vld1q_f32(k00);
float32x4_t _k01 = vld1q_f32(k01);
float32x4_t _k02 = vld1q_f32(k02);
float32x4_t _k03 = vld1q_f32(k03);
float32x4_t _k10 = vld1q_f32(k10);
float32x4_t _k11 = vld1q_f32(k11);
float32x4_t _k12 = vld1q_f32(k12);
float32x4_t _k13 = vld1q_f32(k13);
float32x4_t _k20 = vld1q_f32(k20);
float32x4_t _k21 = vld1q_f32(k21);
float32x4_t _k22 = vld1q_f32(k22);
float32x4_t _k23 = vld1q_f32(k23);
float32x4_t _k30 = vld1q_f32(k30);
float32x4_t _k31 = vld1q_f32(k31);
float32x4_t _k32 = vld1q_f32(k32);
float32x4_t _k33 = vld1q_f32(k33);
// tile
int nn = tiles >> 2;
int remain = tiles & 3;
#ifdef __clang__
// gcc reject over 30 oprands :(
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v16.4s}, [%5], #16 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v20.4s}, [%1] \n"
"add x4, %1, #16 \n"
"fmla v20.4s, v16.4s, %18.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v21.4s}, [%2] \n"
"add x5, %2, #16 \n"
"fmla v21.4s, v16.4s, %22.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v22.4s}, [%3] \n"
"add x6, %3, #16 \n"
"fmla v22.4s, v16.4s, %26.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v23.4s}, [%4] \n"
"add x7, %4, #16 \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v17.4s}, [%6], #16 \n"
"fmla v23.4s, v16.4s, %30.4s \n"
"prfm pldl1keep, [x4, #128] \n"
"ld1 {v24.4s}, [x4] \n"
"fmla v20.4s, v17.4s, %19.4s \n"
"fmla v21.4s, v17.4s, %23.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v18.4s}, [%7], #16 \n"
"fmla v22.4s, v17.4s, %27.4s \n"
"fmla v23.4s, v17.4s, %31.4s \n"
"prfm pldl1keep, [x5, #128] \n"
"ld1 {v25.4s}, [x5] \n"
"fmla v20.4s, v18.4s, %20.4s \n"
"fmla v21.4s, v18.4s, %24.4s \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v19.4s}, [%8], #16 \n"
"fmla v22.4s, v18.4s, %28.4s \n"
"fmla v23.4s, v18.4s, %32.4s \n"
"prfm pldl1keep, [x6, #128] \n"
"ld1 {v26.4s}, [x6] \n"
"fmla v20.4s, v19.4s, %21.4s \n"
"fmla v21.4s, v19.4s, %25.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v16.4s}, [%5], #16 \n"
"fmla v22.4s, v19.4s, %29.4s \n"
"fmla v23.4s, v19.4s, %33.4s \n"
///////
"prfm pldl1keep, [x7, #128] \n"
"ld1 {v27.4s}, [x7] \n"
"st1 {v20.4s}, [%1] \n"
"add %1, %1, #32 \n"
"fmla v24.4s, v16.4s, %18.4s \n"
"fmla v25.4s, v16.4s, %22.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v17.4s}, [%6], #16 \n"
"fmla v26.4s, v16.4s, %26.4s \n"
"fmla v27.4s, v16.4s, %30.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v20.4s}, [%1] \n"
"st1 {v21.4s}, [%2] \n"
"add %2, %2, #32 \n"
"fmla v24.4s, v17.4s, %19.4s \n"
"fmla v25.4s, v17.4s, %23.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v18.4s}, [%7], #16 \n"
"fmla v26.4s, v17.4s, %27.4s \n"
"fmla v27.4s, v17.4s, %31.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v21.4s}, [%2] \n"
"st1 {v22.4s}, [%3] \n"
"add %3, %3, #32 \n"
"fmla v24.4s, v18.4s, %20.4s \n"
"fmla v25.4s, v18.4s, %24.4s \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v19.4s}, [%8], #16 \n"
"fmla v26.4s, v18.4s, %28.4s \n"
"fmla v27.4s, v18.4s, %32.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v22.4s}, [%3] \n"
"st1 {v23.4s}, [%4] \n"
"add %4, %4, #32 \n"
"fmla v24.4s, v19.4s, %21.4s \n"
"fmla v25.4s, v19.4s, %25.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v16.4s}, [%5], #16 \n"
"fmla v26.4s, v19.4s, %29.4s \n"
"fmla v27.4s, v19.4s, %33.4s \n"
///////
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v23.4s}, [%4] \n"
"st1 {v24.4s}, [x4] \n"
"add x4, x4, #32 \n"
"fmla v20.4s, v16.4s, %18.4s \n"
"fmla v21.4s, v16.4s, %22.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v17.4s}, [%6], #16 \n"
"fmla v22.4s, v16.4s, %26.4s \n"
"fmla v23.4s, v16.4s, %30.4s \n"
"prfm pldl1keep, [x4, #128] \n"
"ld1 {v24.4s}, [x4] \n"
"st1 {v25.4s}, [x5] \n"
"add x5, x5, #32 \n"
"fmla v20.4s, v17.4s, %19.4s \n"
"fmla v21.4s, v17.4s, %23.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v18.4s}, [%7], #16 \n"
"fmla v22.4s, v17.4s, %27.4s \n"
"fmla v23.4s, v17.4s, %31.4s \n"
"prfm pldl1keep, [x5, #128] \n"
"ld1 {v25.4s}, [x5] \n"
"st1 {v26.4s}, [x6] \n"
"add x6, x6, #32 \n"
"fmla v20.4s, v18.4s, %20.4s \n"
"fmla v21.4s, v18.4s, %24.4s \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v19.4s}, [%8], #16 \n"
"fmla v22.4s, v18.4s, %28.4s \n"
"fmla v23.4s, v18.4s, %32.4s \n"
"prfm pldl1keep, [x6, #128] \n"
"ld1 {v26.4s}, [x6] \n"
"st1 {v27.4s}, [x7] \n"
"add x7, x7, #32 \n"
"fmla v20.4s, v19.4s, %21.4s \n"
"fmla v21.4s, v19.4s, %25.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v16.4s}, [%5], #16 \n"
"fmla v22.4s, v19.4s, %29.4s \n"
"fmla v23.4s, v19.4s, %33.4s \n"
///////
"prfm pldl1keep, [x7, #128] \n"
"ld1 {v27.4s}, [x7] \n"
"st1 {v20.4s}, [%1] \n"
"fmla v24.4s, v16.4s, %18.4s \n"
"fmla v25.4s, v16.4s, %22.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v17.4s}, [%6], #16 \n"
"fmla v26.4s, v16.4s, %26.4s \n"
"fmla v27.4s, v16.4s, %30.4s \n"
"st1 {v21.4s}, [%2] \n"
"fmla v24.4s, v17.4s, %19.4s \n"
"fmla v25.4s, v17.4s, %23.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v18.4s}, [%7], #16 \n"
"fmla v26.4s, v17.4s, %27.4s \n"
"fmla v27.4s, v17.4s, %31.4s \n"
"st1 {v22.4s}, [%3] \n"
"fmla v24.4s, v18.4s, %20.4s \n"
"fmla v25.4s, v18.4s, %24.4s \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v19.4s}, [%8], #16 \n"
"fmla v26.4s, v18.4s, %28.4s \n"
"fmla v27.4s, v18.4s, %32.4s \n"
"st1 {v23.4s}, [%4] \n"
"fmla v24.4s, v19.4s, %21.4s \n"
"fmla v25.4s, v19.4s, %25.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v16.4s}, [%5], #16 \n"
"fmla v26.4s, v19.4s, %29.4s \n"
"fmla v27.4s, v19.4s, %33.4s \n"
"st1 {v24.4s}, [x4], #16 \n"
"mov %1, x4 \n"
"st1 {v25.4s}, [x5], #16 \n"
"mov %2, x5 \n"
"subs %w0, %w0, #1 \n"
"st1 {v26.4s}, [x6], #16 \n"
"mov %3, x6 \n"
"st1 {v27.4s}, [x7], #16 \n"
"mov %4, x7 \n"
"bne 0b \n"
"sub %5, %5, #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"w"(_k00), // %18
"w"(_k01), // %19
"w"(_k02), // %20
"w"(_k03), // %21
"w"(_k10), // %22
"w"(_k11), // %23
"w"(_k12), // %24
"w"(_k13), // %25
"w"(_k20), // %26
"w"(_k21), // %27
"w"(_k22), // %28
"w"(_k23), // %29
"w"(_k30), // %30
"w"(_k31), // %31
"w"(_k32), // %32
"w"(_k33) // %33
: "cc", "memory", "x4", "x5", "x6", "x7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
}
#else
for (; nn>0; nn--)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output2_tm = vld1q_f32(output2_tm);
float32x4_t _output3_tm = vld1q_f32(output3_tm);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _r3 = vld1q_f32(r3);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tm = vmlaq_f32(_output0_tm, _r2, _k02);
_output0_tm = vmlaq_f32(_output0_tm, _r3, _k03);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tm = vmlaq_f32(_output1_tm, _r2, _k12);
_output1_tm = vmlaq_f32(_output1_tm, _r3, _k13);
_output2_tm = vmlaq_f32(_output2_tm, _r0, _k20);
_output2_tm = vmlaq_f32(_output2_tm, _r1, _k21);
_output2_tm = vmlaq_f32(_output2_tm, _r2, _k22);
_output2_tm = vmlaq_f32(_output2_tm, _r3, _k23);
_output3_tm = vmlaq_f32(_output3_tm, _r0, _k30);
_output3_tm = vmlaq_f32(_output3_tm, _r1, _k31);
_output3_tm = vmlaq_f32(_output3_tm, _r2, _k32);
_output3_tm = vmlaq_f32(_output3_tm, _r3, _k33);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output2_tm, _output2_tm);
vst1q_f32(output3_tm, _output3_tm);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
_output0_tm = vld1q_f32(output0_tm);
_output1_tm = vld1q_f32(output1_tm);
_output2_tm = vld1q_f32(output2_tm);
_output3_tm = vld1q_f32(output3_tm);
_r0 = vld1q_f32(r0);
_r1 = vld1q_f32(r1);
_r2 = vld1q_f32(r2);
_r3 = vld1q_f32(r3);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tm = vmlaq_f32(_output0_tm, _r2, _k02);
_output0_tm = vmlaq_f32(_output0_tm, _r3, _k03);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tm = vmlaq_f32(_output1_tm, _r2, _k12);
_output1_tm = vmlaq_f32(_output1_tm, _r3, _k13);
_output2_tm = vmlaq_f32(_output2_tm, _r0, _k20);
_output2_tm = vmlaq_f32(_output2_tm, _r1, _k21);
_output2_tm = vmlaq_f32(_output2_tm, _r2, _k22);
_output2_tm = vmlaq_f32(_output2_tm, _r3, _k23);
_output3_tm = vmlaq_f32(_output3_tm, _r0, _k30);
_output3_tm = vmlaq_f32(_output3_tm, _r1, _k31);
_output3_tm = vmlaq_f32(_output3_tm, _r2, _k32);
_output3_tm = vmlaq_f32(_output3_tm, _r3, _k33);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output2_tm, _output2_tm);
vst1q_f32(output3_tm, _output3_tm);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
_output0_tm = vld1q_f32(output0_tm);
_output1_tm = vld1q_f32(output1_tm);
_output2_tm = vld1q_f32(output2_tm);
_output3_tm = vld1q_f32(output3_tm);
_r0 = vld1q_f32(r0);
_r1 = vld1q_f32(r1);
_r2 = vld1q_f32(r2);
_r3 = vld1q_f32(r3);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tm = vmlaq_f32(_output0_tm, _r2, _k02);
_output0_tm = vmlaq_f32(_output0_tm, _r3, _k03);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tm = vmlaq_f32(_output1_tm, _r2, _k12);
_output1_tm = vmlaq_f32(_output1_tm, _r3, _k13);
_output2_tm = vmlaq_f32(_output2_tm, _r0, _k20);
_output2_tm = vmlaq_f32(_output2_tm, _r1, _k21);
_output2_tm = vmlaq_f32(_output2_tm, _r2, _k22);
_output2_tm = vmlaq_f32(_output2_tm, _r3, _k23);
_output3_tm = vmlaq_f32(_output3_tm, _r0, _k30);
_output3_tm = vmlaq_f32(_output3_tm, _r1, _k31);
_output3_tm = vmlaq_f32(_output3_tm, _r2, _k32);
_output3_tm = vmlaq_f32(_output3_tm, _r3, _k33);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output2_tm, _output2_tm);
vst1q_f32(output3_tm, _output3_tm);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
_output0_tm = vld1q_f32(output0_tm);
_output1_tm = vld1q_f32(output1_tm);
_output2_tm = vld1q_f32(output2_tm);
_output3_tm = vld1q_f32(output3_tm);
_r0 = vld1q_f32(r0);
_r1 = vld1q_f32(r1);
_r2 = vld1q_f32(r2);
_r3 = vld1q_f32(r3);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tm = vmlaq_f32(_output0_tm, _r2, _k02);
_output0_tm = vmlaq_f32(_output0_tm, _r3, _k03);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tm = vmlaq_f32(_output1_tm, _r2, _k12);
_output1_tm = vmlaq_f32(_output1_tm, _r3, _k13);
_output2_tm = vmlaq_f32(_output2_tm, _r0, _k20);
_output2_tm = vmlaq_f32(_output2_tm, _r1, _k21);
_output2_tm = vmlaq_f32(_output2_tm, _r2, _k22);
_output2_tm = vmlaq_f32(_output2_tm, _r3, _k23);
_output3_tm = vmlaq_f32(_output3_tm, _r0, _k30);
_output3_tm = vmlaq_f32(_output3_tm, _r1, _k31);
_output3_tm = vmlaq_f32(_output3_tm, _r2, _k32);
_output3_tm = vmlaq_f32(_output3_tm, _r3, _k33);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output2_tm, _output2_tm);
vst1q_f32(output3_tm, _output3_tm);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
}
#endif
for (; remain>0; remain--)
{
#ifdef __clang__
// gcc reject over 30 oprands :(
asm volatile(
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v16.4s}, [%5], #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v20.4s}, [%1] \n"
"fmla v20.4s, v16.4s, %18.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v21.4s}, [%2] \n"
"fmla v21.4s, v16.4s, %22.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v22.4s}, [%3] \n"
"fmla v22.4s, v16.4s, %26.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v23.4s}, [%4] \n"
"fmla v23.4s, v16.4s, %30.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v17.4s}, [%6], #16 \n"
"fmla v20.4s, v17.4s, %19.4s \n"
"fmla v21.4s, v17.4s, %23.4s \n"
"fmla v22.4s, v17.4s, %27.4s \n"
"fmla v23.4s, v17.4s, %31.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v18.4s}, [%7], #16 \n"
"fmla v20.4s, v18.4s, %20.4s \n"
"fmla v21.4s, v18.4s, %24.4s \n"
"fmla v22.4s, v18.4s, %28.4s \n"
"fmla v23.4s, v18.4s, %32.4s \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v19.4s}, [%8], #16 \n"
"fmla v20.4s, v19.4s, %21.4s \n"
"fmla v21.4s, v19.4s, %25.4s \n"
"fmla v22.4s, v19.4s, %29.4s \n"
"fmla v23.4s, v19.4s, %33.4s \n"
"st1 {v20.4s}, [%1], #16 \n"
"st1 {v21.4s}, [%2], #16 \n"
"st1 {v22.4s}, [%3], #16 \n"
"st1 {v23.4s}, [%4], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"w"(_k00), // %18
"w"(_k01), // %19
"w"(_k02), // %20
"w"(_k03), // %21
"w"(_k10), // %22
"w"(_k11), // %23
"w"(_k12), // %24
"w"(_k13), // %25
"w"(_k20), // %26
"w"(_k21), // %27
"w"(_k22), // %28
"w"(_k23), // %29
"w"(_k30), // %30
"w"(_k31), // %31
"w"(_k32), // %32
"w"(_k33) // %33
: "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"//, "v24", "v25", "v26", "v27"
);
#else
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output2_tm = vld1q_f32(output2_tm);
float32x4_t _output3_tm = vld1q_f32(output3_tm);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _r3 = vld1q_f32(r3);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tm = vmlaq_f32(_output0_tm, _r2, _k02);
_output0_tm = vmlaq_f32(_output0_tm, _r3, _k03);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tm = vmlaq_f32(_output1_tm, _r2, _k12);
_output1_tm = vmlaq_f32(_output1_tm, _r3, _k13);
_output2_tm = vmlaq_f32(_output2_tm, _r0, _k20);
_output2_tm = vmlaq_f32(_output2_tm, _r1, _k21);
_output2_tm = vmlaq_f32(_output2_tm, _r2, _k22);
_output2_tm = vmlaq_f32(_output2_tm, _r3, _k23);
_output3_tm = vmlaq_f32(_output3_tm, _r0, _k30);
_output3_tm = vmlaq_f32(_output3_tm, _r1, _k31);
_output3_tm = vmlaq_f32(_output3_tm, _r2, _k32);
_output3_tm = vmlaq_f32(_output3_tm, _r3, _k33);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output2_tm, _output2_tm);
vst1q_f32(output3_tm, _output3_tm);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
#endif
}
k00 += 4;
k01 += 4;
k02 += 4;
k03 += 4;
k10 += 4;
k11 += 4;
k12 += 4;
k13 += 4;
k20 += 4;
k21 += 4;
k22 += 4;
k23 += 4;
k30 += 4;
k31 += 4;
k32 += 4;
k33 += 4;
}
}
#endif // __ARM_NEON && __aarch64__
for (; q+1<inch; q+=2)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q+1);
const float* k10 = kernel1_tm.row(q);
const float* k11 = kernel1_tm.row(q+1);
const float* k20 = kernel2_tm.row(q);
const float* k21 = kernel2_tm.row(q+1);
const float* k30 = kernel3_tm.row(q);
const float* k31 = kernel3_tm.row(q+1);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
for (int r=0; r<16; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k00 = vld1q_f32(k00);
float32x4_t _k01 = vld1q_f32(k01);
float32x4_t _k10 = vld1q_f32(k10);
float32x4_t _k11 = vld1q_f32(k11);
float32x4_t _k20 = vld1q_f32(k20);
float32x4_t _k21 = vld1q_f32(k21);
float32x4_t _k30 = vld1q_f32(k30);
float32x4_t _k31 = vld1q_f32(k31);
#else
float32x4_t _k00;
float32x4_t _k01;
float32x4_t _k10;
float32x4_t _k11;
float32x4_t _k20;
float32x4_t _k21;
float32x4_t _k30;
float32x4_t _k31;
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {%e8-%f8}, [%0 :128]! \n"
"pld [%1, #128] \n"
"vld1.f32 {%e9-%f9}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {%e10-%f10}, [%2 :128]! \n"
"pld [%3, #128] \n"
"vld1.f32 {%e11-%f11}, [%3 :128]! \n"
"pld [%4, #128] \n"
"vld1.f32 {%e12-%f12}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {%e13-%f13}, [%5 :128]! \n"
"pld [%6, #128] \n"
"vld1.f32 {%e14-%f14}, [%6 :128]! \n"
"pld [%7, #128] \n"
"vld1.f32 {%e15-%f15}, [%7 :128]! \n"
: "=r"(k00), // %0
"=r"(k01), // %1
"=r"(k10), // %2
"=r"(k11), // %3
"=r"(k20), // %4
"=r"(k21), // %5
"=r"(k30), // %6
"=r"(k31), // %7
"=w"(_k00), // %8
"=w"(_k01), // %9
"=w"(_k10), // %10
"=w"(_k11), // %11
"=w"(_k20), // %12
"=w"(_k21), // %13
"=w"(_k30), // %14
"=w"(_k31) // %15
: "0"(k00),
"1"(k01),
"2"(k10),
"3"(k11),
"4"(k20),
"5"(k21),
"6"(k30),
"7"(k31)
: "cc", "memory"
);
#endif
#endif // __ARM_NEON
// tile
#if __ARM_NEON
int nn = tiles >> 2;
int remain = tiles & 3;
#else
int remain = tiles;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v20.4s}, [%5], #16 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v16.4s}, [%1] \n"
"fmla v16.4s, v20.4s, %14.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v17.4s}, [%2] \n"
"fmla v17.4s, v20.4s, %16.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v18.4s}, [%3] \n"
"fmla v18.4s, v20.4s, %18.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v19.4s}, [%4] \n"
"fmla v19.4s, v20.4s, %20.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v21.4s}, [%6], #16 \n"
"fmla v16.4s, v21.4s, %15.4s \n"
"fmla v17.4s, v21.4s, %17.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v20.4s}, [%5], #16 \n"
"fmla v18.4s, v21.4s, %19.4s \n"
"fmla v19.4s, v21.4s, %21.4s \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
////
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v16.4s}, [%1] \n"
"fmla v16.4s, v20.4s, %14.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v17.4s}, [%2] \n"
"fmla v17.4s, v20.4s, %16.4s \n"
"st1 {v18.4s}, [%3], #16 \n"
"st1 {v19.4s}, [%4], #16 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v18.4s}, [%3] \n"
"fmla v18.4s, v20.4s, %18.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v19.4s}, [%4] \n"
"fmla v19.4s, v20.4s, %20.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v21.4s}, [%6], #16 \n"
"fmla v16.4s, v21.4s, %15.4s \n"
"fmla v17.4s, v21.4s, %17.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v20.4s}, [%5], #16 \n"
"fmla v18.4s, v21.4s, %19.4s \n"
"fmla v19.4s, v21.4s, %21.4s \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
////
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v16.4s}, [%1] \n"
"fmla v16.4s, v20.4s, %14.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v17.4s}, [%2] \n"
"fmla v17.4s, v20.4s, %16.4s \n"
"st1 {v18.4s}, [%3], #16 \n"
"st1 {v19.4s}, [%4], #16 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v18.4s}, [%3] \n"
"fmla v18.4s, v20.4s, %18.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v19.4s}, [%4] \n"
"fmla v19.4s, v20.4s, %20.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v21.4s}, [%6], #16 \n"
"fmla v16.4s, v21.4s, %15.4s \n"
"fmla v17.4s, v21.4s, %17.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v20.4s}, [%5], #16 \n"
"fmla v18.4s, v21.4s, %19.4s \n"
"fmla v19.4s, v21.4s, %21.4s \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
////
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v16.4s}, [%1] \n"
"fmla v16.4s, v20.4s, %14.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v17.4s}, [%2] \n"
"fmla v17.4s, v20.4s, %16.4s \n"
"st1 {v18.4s}, [%3], #16 \n"
"st1 {v19.4s}, [%4], #16 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v18.4s}, [%3] \n"
"fmla v18.4s, v20.4s, %18.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v19.4s}, [%4] \n"
"fmla v19.4s, v20.4s, %20.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v21.4s}, [%6], #16 \n"
"fmla v16.4s, v21.4s, %15.4s \n"
"fmla v17.4s, v21.4s, %17.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v20.4s}, [%5], #16 \n"
"fmla v18.4s, v21.4s, %19.4s \n"
"fmla v19.4s, v21.4s, %21.4s \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
"subs %w0, %w0, #1 \n"
"st1 {v18.4s}, [%3], #16 \n"
"st1 {v19.4s}, [%4], #16 \n"
"bne 0b \n"
"sub %5, %5, #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(r1) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(r1),
"w"(_k00), // %14
"w"(_k01), // %15
"w"(_k10), // %16
"w"(_k11), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k30), // %20
"w"(_k31) // %21
: "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%5, #128] \n"
"vld1.f32 {d24-d25}, [%5 :128]! \n"// q12 = _r0
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d16-d17}, [%1 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, %q14 \n"
"pld [%2, #128] \n"
"vld1.f32 {d18-d19}, [%2 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, %q16 \n"
"pld [%3, #128] \n"
"vld1.f32 {d20-d21}, [%3 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, %q18 \n"
"pld [%4, #128] \n"
"vld1.f32 {d22-d23}, [%4 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, %q20 \n"
"pld [%6, #128] \n"
"vld1.f32 {d26-d27}, [%6 :128]! \n"// q13 = _r1
"vmla.f32 q8, q13, %q15 \n"
"vmla.f32 q9, q13, %q17 \n"
"pld [%5, #128] \n"
"vld1.f32 {d24-d25}, [%5 :128]! \n"// q12 = _r0
"vmla.f32 q10, q13, %q19 \n"
"vmla.f32 q11, q13, %q21 \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
"vst1.f32 {d18-d19}, [%2 :128]! \n"
////
"pld [%1, #128] \n"
"vld1.f32 {d16-d17}, [%1 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, %q14 \n"
"pld [%2, #128] \n"
"vld1.f32 {d18-d19}, [%2 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, %q16 \n"
"vst1.f32 {d20-d21}, [%3 :128]! \n"
"vst1.f32 {d22-d23}, [%4 :128]! \n"
"pld [%3, #128] \n"
"vld1.f32 {d20-d21}, [%3 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, %q18 \n"
"pld [%4, #128] \n"
"vld1.f32 {d22-d23}, [%4 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, %q20 \n"
"pld [%6, #128] \n"
"vld1.f32 {d26-d27}, [%6 :128]! \n"// q13 = _r1
"vmla.f32 q8, q13, %q15 \n"
"vmla.f32 q9, q13, %q17 \n"
"pld [%5, #128] \n"
"vld1.f32 {d24-d25}, [%5 :128]! \n"// q12 = _r0
"vmla.f32 q10, q13, %q19 \n"
"vmla.f32 q11, q13, %q21 \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
"vst1.f32 {d18-d19}, [%2 :128]! \n"
////
"pld [%1, #128] \n"
"vld1.f32 {d16-d17}, [%1 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, %q14 \n"
"pld [%2, #128] \n"
"vld1.f32 {d18-d19}, [%2 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, %q16 \n"
"vst1.f32 {d20-d21}, [%3 :128]! \n"
"vst1.f32 {d22-d23}, [%4 :128]! \n"
"pld [%3, #128] \n"
"vld1.f32 {d20-d21}, [%3 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, %q18 \n"
"pld [%4, #128] \n"
"vld1.f32 {d22-d23}, [%4 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, %q20 \n"
"pld [%6, #128] \n"
"vld1.f32 {d26-d27}, [%6 :128]! \n"// q13 = _r1
"vmla.f32 q8, q13, %q15 \n"
"vmla.f32 q9, q13, %q17 \n"
"pld [%5, #128] \n"
"vld1.f32 {d24-d25}, [%5 :128]! \n"// q12 = _r0
"vmla.f32 q10, q13, %q19 \n"
"vmla.f32 q11, q13, %q21 \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
"vst1.f32 {d18-d19}, [%2 :128]! \n"
////
"pld [%1, #128] \n"
"vld1.f32 {d16-d17}, [%1 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, %q14 \n"
"pld [%2, #128] \n"
"vld1.f32 {d18-d19}, [%2 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, %q16 \n"
"vst1.f32 {d20-d21}, [%3 :128]! \n"
"vst1.f32 {d22-d23}, [%4 :128]! \n"
"pld [%3, #128] \n"
"vld1.f32 {d20-d21}, [%3 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, %q18 \n"
"pld [%4, #128] \n"
"vld1.f32 {d22-d23}, [%4 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, %q20 \n"
"pld [%6, #128] \n"
"vld1.f32 {d26-d27}, [%6 :128]! \n"// q13 = _r1
"vmla.f32 q8, q13, %q15 \n"
"vmla.f32 q9, q13, %q17 \n"
"pld [%5, #128] \n"
"vld1.f32 {d24-d25}, [%5 :128]! \n"// q12 = _r0
"vmla.f32 q10, q13, %q19 \n"
"vmla.f32 q11, q13, %q21 \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
"vst1.f32 {d18-d19}, [%2 :128]! \n"
"subs %0, #1 \n"
"vst1.f32 {d20-d21}, [%3 :128]! \n"
"vst1.f32 {d22-d23}, [%4 :128]! \n"
"bne 0b \n"
"sub %5, %5, #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(r1) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(r1),
"w"(_k00), // %14
"w"(_k01), // %15
"w"(_k10), // %16
"w"(_k11), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k30), // %20
"w"(_k31) // %21
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, %12.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, %14.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, %16.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, %18.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, %13.4s \n"
"fmla v17.4s, v21.4s, %15.4s \n"
"fmla v18.4s, v21.4s, %17.4s \n"
"fmla v19.4s, v21.4s, %19.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(r1) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(r1),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k10), // %14
"w"(_k11), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k30), // %18
"w"(_k31) // %19
: "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21"
);
#else
asm volatile(
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, %q12 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, %q14 \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, %q16 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, %q18 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1
"vmla.f32 q8, q13, %q13 \n"
"vmla.f32 q9, q13, %q15 \n"
"vmla.f32 q10, q13, %q17 \n"
"vmla.f32 q11, q13, %q19 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(r1) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(r1),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k10), // %14
"w"(_k11), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k30), // %18
"w"(_k31) // %19
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13"
);
#endif // __aarch64__
#else
for (int m=0; m<4; m++)
{
output0_tm[m] += r0[m] * k00[m];
output0_tm[m] += r1[m] * k01[m];
output1_tm[m] += r0[m] * k10[m];
output1_tm[m] += r1[m] * k11[m];
output2_tm[m] += r0[m] * k20[m];
output2_tm[m] += r1[m] * k21[m];
output3_tm[m] += r0[m] * k30[m];
output3_tm[m] += r1[m] * k31[m];
}
r0 += 4;
r1 += 4;
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k00 += 4;
k01 += 4;
k10 += 4;
k11 += 4;
k20 += 4;
k21 += 4;
k30 += 4;
k31 += 4;
#endif // __aarch64__
#else
k00 += 4;
k01 += 4;
k10 += 4;
k11 += 4;
k20 += 4;
k21 += 4;
k30 += 4;
k31 += 4;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k00 = kernel0_tm.row(q);
const float* k10 = kernel1_tm.row(q);
const float* k20 = kernel2_tm.row(q);
const float* k30 = kernel3_tm.row(q);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
for (int r=0; r<16; r++)
{
#if __ARM_NEON
float32x4_t _k00 = vld1q_f32(k00);
float32x4_t _k10 = vld1q_f32(k10);
float32x4_t _k20 = vld1q_f32(k20);
float32x4_t _k30 = vld1q_f32(k30);
#endif // __ARM_NEON
// tile
int remain = tiles;
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v17.4s}, [%0] \n"
"fmla v17.4s, v16.4s, %10.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v18.4s}, [%1] \n"
"fmla v18.4s, v16.4s, %11.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v19.4s}, [%2] \n"
"fmla v19.4s, v16.4s, %12.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v20.4s}, [%3] \n"
"fmla v20.4s, v16.4s, %13.4s \n"
"st1 {v17.4s}, [%0], #16 \n"
"st1 {v18.4s}, [%1], #16 \n"
"st1 {v19.4s}, [%2], #16 \n"
"st1 {v20.4s}, [%3], #16 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0) // %4
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"w"(_k00), // %10
"w"(_k10), // %11
"w"(_k20), // %12
"w"(_k30) // %13
: "cc", "memory", "v16", "v17", "v18", "v19", "v20"
);
#else
asm volatile(
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, %q10 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, %q11 \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, %q12 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, %q13 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0) // %4
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"w"(_k00), // %10
"w"(_k10), // %11
"w"(_k20), // %12
"w"(_k30) // %13
: "cc", "memory", "q8", "q9", "q10", "q11", "q12"
);
#endif // __aarch64__
#else
for (int m=0; m<4; m++)
{
output0_tm[m] += r0[m] * k00[m];
output1_tm[m] += r0[m] * k10[m];
output2_tm[m] += r0[m] * k20[m];
output3_tm[m] += r0[m] * k30[m];
}
r0 += 4;
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
#endif // __ARM_NEON
}
k00 += 4;
k10 += 4;
k20 += 4;
k30 += 4;
}
}
}
#pragma omp parallel for
for (int p = remain_outch_start; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
out0_tm.fill(0.f);
int q = 0;
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k00 = kernel0_tm.row(q);
float* output0_tm = out0_tm;
for (int r=0; r<16; r++)
{
#if __ARM_NEON
float32x4_t _k00 = vld1q_f32(k00);
#endif // __ARM_NEON
// tile
for (int i=0; i<tiles; i++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1], #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v17.4s, %4.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
: "=r"(output0_tm), // %0
"=r"(r0) // %1
: "0"(output0_tm),
"1"(r0),
"w"(_k00) // %4
: "cc", "memory", "v16", "v17"
);
#else
asm volatile(
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128]! \n"// q9 = _r0
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q9, %q4 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0) // %1
: "0"(output0_tm),
"1"(r0),
"w"(_k00) // %4
: "cc", "memory", "q8", "q9"
);
#endif // __aarch64__
#else
for (int m=0; m<4; m++)
{
output0_tm[m] += r0[m] * k00[m];
}
r0 += 4;
output0_tm += 4;
#endif // __ARM_NEON
}
k00 += 4;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
const float* output0_tm0_0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm0_4 = out0_tm.row(i * w_tm/8 + j + tiles);
const float* output0_tm1_0 = out0_tm.row(i * w_tm/8 + j + tiles * 2);
const float* output0_tm1_4 = out0_tm.row(i * w_tm/8 + j + tiles * 3);
const float* output0_tm2_0 = out0_tm.row(i * w_tm/8 + j + tiles * 4);
const float* output0_tm2_4 = out0_tm.row(i * w_tm/8 + j + tiles * 5);
const float* output0_tm3_0 = out0_tm.row(i * w_tm/8 + j + tiles * 6);
const float* output0_tm3_4 = out0_tm.row(i * w_tm/8 + j + tiles * 7);
const float* output0_tm4_0 = out0_tm.row(i * w_tm/8 + j + tiles * 8);
const float* output0_tm4_4 = out0_tm.row(i * w_tm/8 + j + tiles * 9);
const float* output0_tm5_0 = out0_tm.row(i * w_tm/8 + j + tiles * 10);
const float* output0_tm5_4 = out0_tm.row(i * w_tm/8 + j + tiles * 11);
const float* output0_tm6_0 = out0_tm.row(i * w_tm/8 + j + tiles * 12);
const float* output0_tm6_4 = out0_tm.row(i * w_tm/8 + j + tiles * 13);
const float* output0_tm7_0 = out0_tm.row(i * w_tm/8 + j + tiles * 14);
const float* output0_tm7_4 = out0_tm.row(i * w_tm/8 + j + tiles * 15);
float* output0 = out0.row(i * 6) + j * 6;
const float* output0_tms_0[8] = { output0_tm0_0, output0_tm1_0, output0_tm2_0, output0_tm3_0, output0_tm4_0, output0_tm5_0, output0_tm6_0, output0_tm7_0 };
const float* output0_tms_4[8] = { output0_tm0_4, output0_tm1_4, output0_tm2_4, output0_tm3_4, output0_tm4_4, output0_tm5_4, output0_tm6_4, output0_tm7_4 };
for (int m=0; m<8; m++)
{
const float* output0_tm_0 = output0_tms_0[m];
const float* output0_tm_4 = output0_tms_4[m];
float tmp024a = output0_tm_0[1] + output0_tm_0[2];
float tmp135a = output0_tm_0[1] - output0_tm_0[2];
float tmp024b = output0_tm_0[3] + output0_tm_4[0];
float tmp135b = output0_tm_0[3] - output0_tm_4[0];
float tmp024c = output0_tm_4[1] + output0_tm_4[2];
float tmp135c = output0_tm_4[1] - output0_tm_4[2];
tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm_4[3] + tmp135a + tmp135b * 32 + tmp135c;
}
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w);
}
static void conv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
const float* kernel0 = kernel + p*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k3456 = vld1q_f32(k1);
float32x4_t _k6789 = vld1q_f32(k2);
#endif // __ARM_NEON
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _outp = vld1q_f32(outptr);
float32x4x2_t _r0 = vld2q_f32(r0);
float32x4x2_t _r0n = vld2q_f32(r0+8);
float32x4_t _r00 = _r0.val[0];// 0 2 4 6
float32x4_t _r01 = _r0.val[1];// 1 3 5 7
float32x4_t _r02 = vextq_f32(_r00, _r0n.val[0], 1);// 2 4 6 8
_outp = vfmaq_laneq_f32(_outp, _r00, _k0123, 0);
_outp = vfmaq_laneq_f32(_outp, _r01, _k0123, 1);
_outp = vfmaq_laneq_f32(_outp, _r02, _k0123, 2);
float32x4x2_t _r1 = vld2q_f32(r1);
float32x4x2_t _r1n = vld2q_f32(r1+8);
float32x4_t _r10 = _r1.val[0];
float32x4_t _r11 = _r1.val[1];
float32x4_t _r12 = vextq_f32(_r10, _r1n.val[0], 1);
_outp = vfmaq_laneq_f32(_outp, _r10, _k3456, 0);
_outp = vfmaq_laneq_f32(_outp, _r11, _k3456, 1);
_outp = vfmaq_laneq_f32(_outp, _r12, _k3456, 2);
float32x4x2_t _r2 = vld2q_f32(r2);
float32x4x2_t _r2n = vld2q_f32(r2+8);
float32x4_t _r20 = _r2.val[0];
float32x4_t _r21 = _r2.val[1];
float32x4_t _r22 = vextq_f32(_r20, _r2n.val[0], 1);
_outp = vfmaq_laneq_f32(_outp, _r20, _k6789, 0);
_outp = vfmaq_laneq_f32(_outp, _r21, _k6789, 1);
_outp = vfmaq_laneq_f32(_outp, _r22, _k6789, 2);
vst1q_f32(outptr, _outp);
r0 += 8;
r1 += 8;
r2 += 8;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1] \n"
"vmla.f32 q0, q2, %e10[0] \n"
"vmul.f32 q10, q3, %e10[1] \n"
"pld [%2, #128] \n"
"vld2.f32 {d16-d17}, [%2] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmul.f32 q11, q1, %f10[0] \n"
"pld [%3, #256] \n"
"vld2.f32 {d4-d7}, [%3]! \n"
"vmla.f32 q0, q2, %e11[0] \n"
"vmla.f32 q10, q3, %e11[1] \n"
"pld [%3, #128] \n"
"vld2.f32 {d16-d17}, [%3] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f11[0] \n"
"pld [%4, #256] \n"
"vld2.f32 {d4-d7}, [%4]! \n"
"vmla.f32 q0, q2, %e12[0] \n"
"vmla.f32 q10, q3, %e12[1] \n"
"pld [%4, #128] \n"
"vld2.f32 {d16-d17}, [%4] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f12[0] \n"
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vadd.f32 q0, q0, q10 \n"
"vadd.f32 q0, q0, q11 \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d1}, [%1]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
}
|
strassen.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/**********************************************************************************************/
/*
* Copyright (c) 1996 Massachusetts Institute of Technology
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to use, copy, modify, and distribute the Software without
* restriction, provided the Software, including any modified copies made
* under this license, is not distributed for a fee, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE MASSACHUSETTS INSTITUTE OF TECHNOLOGY BE LIABLE
* FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* /WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Except as contained in this notice, the name of the Massachusetts
* Institute of Technology shall not be used in advertising or otherwise
* to promote the sale, use or other dealings in this Software without
* prior written authorization from the Massachusetts Institute of
* Technology.
*
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "app-desc.h"
#include "bots.h"
#include "strassen.h"
/***********************************************************************
* Naive sequential algorithm, for comparison purposes
**********************************************************************/
void matrixmul(int n, REAL *A, int an, REAL *B, int bn, REAL *C, int cn)
{
int i, j, k;
REAL s;
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
s = 0.0;
for (k = 0; k < n; ++k) s += ELEM(A, an, i, k) * ELEM(B, bn, k, j);
ELEM(C, cn, i, j) = s;
}
}
}
/*****************************************************************************
**
** FastNaiveMatrixMultiply
**
** For small to medium sized matrices A, B, and C of size
** MatrixSize * MatrixSize this function performs the operation
** C = A x B efficiently.
**
** Note MatrixSize must be divisible by 8.
**
** INPUT:
** C = (*C WRITE) Address of top left element of matrix C.
** A = (*A IS READ ONLY) Address of top left element of matrix A.
** B = (*B IS READ ONLY) Address of top left element of matrix B.
** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n)
** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1]
** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1]
** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1]
**
** OUTPUT:
** C = (*C WRITE) Matrix C contains A x B. (Initial value of *C undefined.)
**
*****************************************************************************/
void FastNaiveMatrixMultiply(REAL *C, REAL *A, REAL *B, unsigned MatrixSize,
unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB)
{
/* Assumes size of real is 8 bytes */
PTR RowWidthBInBytes = RowWidthB << 3;
PTR RowWidthAInBytes = RowWidthA << 3;
PTR MatrixWidthInBytes = MatrixSize << 3;
PTR RowIncrementC = ( RowWidthC - MatrixSize) << 3;
unsigned Horizontal, Vertical;
REAL *ARowStart = A;
for (Vertical = 0; Vertical < MatrixSize; Vertical++) {
for (Horizontal = 0; Horizontal < MatrixSize; Horizontal += 8) {
REAL *BColumnStart = B + Horizontal;
REAL FirstARowValue = *ARowStart++;
REAL Sum0 = FirstARowValue * (*BColumnStart);
REAL Sum1 = FirstARowValue * (*(BColumnStart+1));
REAL Sum2 = FirstARowValue * (*(BColumnStart+2));
REAL Sum3 = FirstARowValue * (*(BColumnStart+3));
REAL Sum4 = FirstARowValue * (*(BColumnStart+4));
REAL Sum5 = FirstARowValue * (*(BColumnStart+5));
REAL Sum6 = FirstARowValue * (*(BColumnStart+6));
REAL Sum7 = FirstARowValue * (*(BColumnStart+7));
unsigned Products;
for (Products = 1; Products < MatrixSize; Products++) {
REAL ARowValue = *ARowStart++;
BColumnStart = (REAL*) (((PTR) BColumnStart) + RowWidthBInBytes);
Sum0 += ARowValue * (*BColumnStart);
Sum1 += ARowValue * (*(BColumnStart+1));
Sum2 += ARowValue * (*(BColumnStart+2));
Sum3 += ARowValue * (*(BColumnStart+3));
Sum4 += ARowValue * (*(BColumnStart+4));
Sum5 += ARowValue * (*(BColumnStart+5));
Sum6 += ARowValue * (*(BColumnStart+6));
Sum7 += ARowValue * (*(BColumnStart+7));
}
ARowStart = (REAL*) ( ((PTR) ARowStart) - MatrixWidthInBytes);
*(C) = Sum0;
*(C+1) = Sum1;
*(C+2) = Sum2;
*(C+3) = Sum3;
*(C+4) = Sum4;
*(C+5) = Sum5;
*(C+6) = Sum6;
*(C+7) = Sum7;
C+=8;
}
ARowStart = (REAL*) ( ((PTR) ARowStart) + RowWidthAInBytes );
C = (REAL*) ( ((PTR) C) + RowIncrementC );
}
}
/*****************************************************************************
**
** FastAdditiveNaiveMatrixMultiply
**
** For small to medium sized matrices A, B, and C of size
** MatrixSize * MatrixSize this function performs the operation
** C += A x B efficiently.
**
** Note MatrixSize must be divisible by 8.
**
** INPUT:
** C = (*C READ/WRITE) Address of top left element of matrix C.
** A = (*A IS READ ONLY) Address of top left element of matrix A.
** B = (*B IS READ ONLY) Address of top left element of matrix B.
** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n)
** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1]
** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1]
** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1]
**
** OUTPUT:
** C = (*C READ/WRITE) Matrix C contains C + A x B.
**
*****************************************************************************/
void FastAdditiveNaiveMatrixMultiply(REAL *C, REAL *A, REAL *B, unsigned MatrixSize,
unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB)
{
/* Assumes size of real is 8 bytes */
PTR RowWidthBInBytes = RowWidthB << 3;
PTR RowWidthAInBytes = RowWidthA << 3;
PTR MatrixWidthInBytes = MatrixSize << 3;
PTR RowIncrementC = ( RowWidthC - MatrixSize) << 3;
unsigned Horizontal, Vertical;
REAL *ARowStart = A;
for (Vertical = 0; Vertical < MatrixSize; Vertical++) {
for (Horizontal = 0; Horizontal < MatrixSize; Horizontal += 8) {
REAL *BColumnStart = B + Horizontal;
REAL Sum0 = *C;
REAL Sum1 = *(C+1);
REAL Sum2 = *(C+2);
REAL Sum3 = *(C+3);
REAL Sum4 = *(C+4);
REAL Sum5 = *(C+5);
REAL Sum6 = *(C+6);
REAL Sum7 = *(C+7);
unsigned Products;
for (Products = 0; Products < MatrixSize; Products++) {
REAL ARowValue = *ARowStart++;
Sum0 += ARowValue * (*BColumnStart);
Sum1 += ARowValue * (*(BColumnStart+1));
Sum2 += ARowValue * (*(BColumnStart+2));
Sum3 += ARowValue * (*(BColumnStart+3));
Sum4 += ARowValue * (*(BColumnStart+4));
Sum5 += ARowValue * (*(BColumnStart+5));
Sum6 += ARowValue * (*(BColumnStart+6));
Sum7 += ARowValue * (*(BColumnStart+7));
BColumnStart = (REAL*) (((PTR) BColumnStart) + RowWidthBInBytes);
}
ARowStart = (REAL*) ( ((PTR) ARowStart) - MatrixWidthInBytes);
*(C) = Sum0;
*(C+1) = Sum1;
*(C+2) = Sum2;
*(C+3) = Sum3;
*(C+4) = Sum4;
*(C+5) = Sum5;
*(C+6) = Sum6;
*(C+7) = Sum7;
C+=8;
}
ARowStart = (REAL*) ( ((PTR) ARowStart) + RowWidthAInBytes );
C = (REAL*) ( ((PTR) C) + RowIncrementC );
}
}
/*****************************************************************************
**
** MultiplyByDivideAndConquer
**
** For medium to medium-large (would you like fries with that) sized
** matrices A, B, and C of size MatrixSize * MatrixSize this function
** efficiently performs the operation
** C = A x B (if AdditiveMode == 0)
** C += A x B (if AdditiveMode != 0)
**
** Note MatrixSize must be divisible by 16.
**
** INPUT:
** C = (*C READ/WRITE) Address of top left element of matrix C.
** A = (*A IS READ ONLY) Address of top left element of matrix A.
** B = (*B IS READ ONLY) Address of top left element of matrix B.
** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n)
** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1]
** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1]
** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1]
** AdditiveMode = 0 if we want C = A x B, otherwise we'll do C += A x B
**
** OUTPUT:
** C (+)= A x B. (+ if AdditiveMode != 0)
**
*****************************************************************************/
void MultiplyByDivideAndConquer(REAL *C, REAL *A, REAL *B,
unsigned MatrixSize,
unsigned RowWidthC,
unsigned RowWidthA,
unsigned RowWidthB,
int AdditiveMode
)
{
#define A00 A
#define B00 B
#define C00 C
REAL *A01, *A10, *A11, *B01, *B10, *B11, *C01, *C10, *C11;
unsigned QuadrantSize = MatrixSize >> 1;
/* partition the matrix */
A01 = A00 + QuadrantSize;
A10 = A00 + RowWidthA * QuadrantSize;
A11 = A10 + QuadrantSize;
B01 = B00 + QuadrantSize;
B10 = B00 + RowWidthB * QuadrantSize;
B11 = B10 + QuadrantSize;
C01 = C00 + QuadrantSize;
C10 = C00 + RowWidthC * QuadrantSize;
C11 = C10 + QuadrantSize;
if (QuadrantSize > SizeAtWhichNaiveAlgorithmIsMoreEfficient) {
MultiplyByDivideAndConquer(C00, A00, B00, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
AdditiveMode);
MultiplyByDivideAndConquer(C01, A00, B01, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
AdditiveMode);
MultiplyByDivideAndConquer(C11, A10, B01, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
AdditiveMode);
MultiplyByDivideAndConquer(C10, A10, B00, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
AdditiveMode);
MultiplyByDivideAndConquer(C00, A01, B10, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
1);
MultiplyByDivideAndConquer(C01, A01, B11, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
1);
MultiplyByDivideAndConquer(C11, A11, B11, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
1);
MultiplyByDivideAndConquer(C10, A11, B10, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
1);
} else {
if (AdditiveMode) {
FastAdditiveNaiveMatrixMultiply(C00, A00, B00, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastAdditiveNaiveMatrixMultiply(C01, A00, B01, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastAdditiveNaiveMatrixMultiply(C11, A10, B01, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastAdditiveNaiveMatrixMultiply(C10, A10, B00, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
} else {
FastNaiveMatrixMultiply(C00, A00, B00, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastNaiveMatrixMultiply(C01, A00, B01, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastNaiveMatrixMultiply(C11, A10, B01, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastNaiveMatrixMultiply(C10, A10, B00, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
}
FastAdditiveNaiveMatrixMultiply(C00, A01, B10, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastAdditiveNaiveMatrixMultiply(C01, A01, B11, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastAdditiveNaiveMatrixMultiply(C11, A11, B11, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastAdditiveNaiveMatrixMultiply(C10, A11, B10, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
}
return;
}
/*****************************************************************************
**
** OptimizedStrassenMultiply
**
** For large matrices A, B, and C of size MatrixSize * MatrixSize this
** function performs the operation C = A x B efficiently.
**
** INPUT:
** C = (*C WRITE) Address of top left element of matrix C.
** A = (*A IS READ ONLY) Address of top left element of matrix A.
** B = (*B IS READ ONLY) Address of top left element of matrix B.
** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n)
** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1]
** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1]
** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1]
**
** OUTPUT:
** C = (*C WRITE) Matrix C contains A x B. (Initial value of *C undefined.)
**
*****************************************************************************/
void OptimizedStrassenMultiply_seq(REAL *C, REAL *A, REAL *B, unsigned MatrixSize,
unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB, int Depth)
{
unsigned QuadrantSize = MatrixSize >> 1; /* MatixSize / 2 */
unsigned QuadrantSizeInBytes = sizeof(REAL) * QuadrantSize * QuadrantSize
+ 32;
unsigned Column, Row;
/************************************************************************
** For each matrix A, B, and C, we'll want pointers to each quandrant
** in the matrix. These quandrants will be addressed as follows:
** -- --
** | A11 A12 |
** | |
** | A21 A22 |
** -- --
************************************************************************/
REAL /* *A11, *B11, *C11, */ *A12, *B12, *C12,
*A21, *B21, *C21, *A22, *B22, *C22;
REAL *S1,*S2,*S3,*S4,*S5,*S6,*S7,*S8,*M2,*M5,*T1sMULT;
#define T2sMULT C22
#define NumberOfVariables 11
PTR TempMatrixOffset = 0;
PTR MatrixOffsetA = 0;
PTR MatrixOffsetB = 0;
char *Heap;
void *StartHeap;
/* Distance between the end of a matrix row and the start of the next row */
PTR RowIncrementA = ( RowWidthA - QuadrantSize ) << 3;
PTR RowIncrementB = ( RowWidthB - QuadrantSize ) << 3;
PTR RowIncrementC = ( RowWidthC - QuadrantSize ) << 3;
if (MatrixSize <= bots_app_cutoff_value) {
MultiplyByDivideAndConquer(C, A, B, MatrixSize, RowWidthC, RowWidthA, RowWidthB, 0);
return;
}
/* Initialize quandrant matrices */
#define A11 A
#define B11 B
#define C11 C
A12 = A11 + QuadrantSize;
B12 = B11 + QuadrantSize;
C12 = C11 + QuadrantSize;
A21 = A + (RowWidthA * QuadrantSize);
B21 = B + (RowWidthB * QuadrantSize);
C21 = C + (RowWidthC * QuadrantSize);
A22 = A21 + QuadrantSize;
B22 = B21 + QuadrantSize;
C22 = C21 + QuadrantSize;
/* Allocate Heap Space Here */
StartHeap = malloc(QuadrantSizeInBytes * NumberOfVariables);
Heap = (char*)StartHeap;
/* ensure that heap is on cache boundary */
if ( ((PTR) Heap) & 31)
Heap = (char*) ( ((PTR) Heap) + 32 - ( ((PTR) Heap) & 31) );
/* Distribute the heap space over the variables */
S1 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S2 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S3 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S4 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S5 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S6 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S7 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S8 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
M2 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
M5 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
T1sMULT = (REAL*) Heap; Heap += QuadrantSizeInBytes;
/***************************************************************************
** Step through all columns row by row (vertically)
** (jumps in memory by RowWidth => bad locality)
** (but we want the best locality on the innermost loop)
***************************************************************************/
for (Row = 0; Row < QuadrantSize; Row++) {
/*************************************************************************
** Step through each row horizontally (addressing elements in each column)
** (jumps linearly througn memory => good locality)
*************************************************************************/
for (Column = 0; Column < QuadrantSize; Column++) {
/***********************************************************
** Within this loop, the following holds for MatrixOffset:
** MatrixOffset = (Row * RowWidth) + Column
** (note: that the unit of the offset is number of reals)
***********************************************************/
/* Element of Global Matrix, such as A, B, C */
#define E(Matrix) (* (REAL*) ( ((PTR) Matrix) + TempMatrixOffset ) )
#define EA(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetA ) )
#define EB(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetB ) )
/* FIXME - may pay to expand these out - got higher speed-ups below */
/* S4 = A12 - ( S2 = ( S1 = A21 + A22 ) - A11 ) */
E(S4) = EA(A12) - ( E(S2) = ( E(S1) = EA(A21) + EA(A22) ) - EA(A11) );
/* S8 = (S6 = B22 - ( S5 = B12 - B11 ) ) - B21 */
E(S8) = ( E(S6) = EB(B22) - ( E(S5) = EB(B12) - EB(B11) ) ) - EB(B21);
/* S3 = A11 - A21 */
E(S3) = EA(A11) - EA(A21);
/* S7 = B22 - B12 */
E(S7) = EB(B22) - EB(B12);
TempMatrixOffset += sizeof(REAL);
MatrixOffsetA += sizeof(REAL);
MatrixOffsetB += sizeof(REAL);
} /* end row loop*/
MatrixOffsetA += RowIncrementA;
MatrixOffsetB += RowIncrementB;
} /* end column loop */
/* M2 = A11 x B11 */
OptimizedStrassenMultiply_seq(M2, A11, B11, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1);
/* M5 = S1 * S5 */
OptimizedStrassenMultiply_seq(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1);
/* Step 1 of T1 = S2 x S6 + M2 */
OptimizedStrassenMultiply_seq(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1);
/* Step 1 of T2 = T1 + S3 x S7 */
OptimizedStrassenMultiply_seq(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1);
/* Step 1 of C11 = M2 + A12 * B21 */
OptimizedStrassenMultiply_seq(C11, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1);
/* Step 1 of C12 = S4 x B22 + T1 + M5 */
OptimizedStrassenMultiply_seq(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1);
/* Step 1 of C21 = T2 - A22 * S8 */
OptimizedStrassenMultiply_seq(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1);
/***************************************************************************
** Step through all columns row by row (vertically)
** (jumps in memory by RowWidth => bad locality)
** (but we want the best locality on the innermost loop)
***************************************************************************/
for (Row = 0; Row < QuadrantSize; Row++) {
/*************************************************************************
** Step through each row horizontally (addressing elements in each column)
** (jumps linearly througn memory => good locality)
*************************************************************************/
for (Column = 0; Column < QuadrantSize; Column += 4) {
REAL LocalM5_0 = *(M5);
REAL LocalM5_1 = *(M5+1);
REAL LocalM5_2 = *(M5+2);
REAL LocalM5_3 = *(M5+3);
REAL LocalM2_0 = *(M2);
REAL LocalM2_1 = *(M2+1);
REAL LocalM2_2 = *(M2+2);
REAL LocalM2_3 = *(M2+3);
REAL T1_0 = *(T1sMULT) + LocalM2_0;
REAL T1_1 = *(T1sMULT+1) + LocalM2_1;
REAL T1_2 = *(T1sMULT+2) + LocalM2_2;
REAL T1_3 = *(T1sMULT+3) + LocalM2_3;
REAL T2_0 = *(C22) + T1_0;
REAL T2_1 = *(C22+1) + T1_1;
REAL T2_2 = *(C22+2) + T1_2;
REAL T2_3 = *(C22+3) + T1_3;
(*(C11)) += LocalM2_0;
(*(C11+1)) += LocalM2_1;
(*(C11+2)) += LocalM2_2;
(*(C11+3)) += LocalM2_3;
(*(C12)) += LocalM5_0 + T1_0;
(*(C12+1)) += LocalM5_1 + T1_1;
(*(C12+2)) += LocalM5_2 + T1_2;
(*(C12+3)) += LocalM5_3 + T1_3;
(*(C22)) = LocalM5_0 + T2_0;
(*(C22+1)) = LocalM5_1 + T2_1;
(*(C22+2)) = LocalM5_2 + T2_2;
(*(C22+3)) = LocalM5_3 + T2_3;
(*(C21 )) = (- *(C21 )) + T2_0;
(*(C21+1)) = (- *(C21+1)) + T2_1;
(*(C21+2)) = (- *(C21+2)) + T2_2;
(*(C21+3)) = (- *(C21+3)) + T2_3;
M5 += 4;
M2 += 4;
T1sMULT += 4;
C11 += 4;
C12 += 4;
C21 += 4;
C22 += 4;
}
C11 = (REAL*) ( ((PTR) C11 ) + RowIncrementC);
C12 = (REAL*) ( ((PTR) C12 ) + RowIncrementC);
C21 = (REAL*) ( ((PTR) C21 ) + RowIncrementC);
C22 = (REAL*) ( ((PTR) C22 ) + RowIncrementC);
}
free(StartHeap);
}
#if defined(IF_CUTOFF)
void OptimizedStrassenMultiply_par(REAL *C, REAL *A, REAL *B, unsigned MatrixSize,
unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB, int Depth)
{
unsigned QuadrantSize = MatrixSize >> 1; /* MatixSize / 2 */
unsigned QuadrantSizeInBytes = sizeof(REAL) * QuadrantSize * QuadrantSize
+ 32;
unsigned Column, Row;
/************************************************************************
** For each matrix A, B, and C, we'll want pointers to each quandrant
** in the matrix. These quandrants will be addressed as follows:
** -- --
** | A11 A12 |
** | |
** | A21 A22 |
** -- --
************************************************************************/
REAL /* *A11, *B11, *C11, */ *A12, *B12, *C12,
*A21, *B21, *C21, *A22, *B22, *C22;
REAL *S1,*S2,*S3,*S4,*S5,*S6,*S7,*S8,*M2,*M5,*T1sMULT;
#define T2sMULT C22
#define NumberOfVariables 11
PTR TempMatrixOffset = 0;
PTR MatrixOffsetA = 0;
PTR MatrixOffsetB = 0;
char *Heap;
void *StartHeap;
/* Distance between the end of a matrix row and the start of the next row */
PTR RowIncrementA = ( RowWidthA - QuadrantSize ) << 3;
PTR RowIncrementB = ( RowWidthB - QuadrantSize ) << 3;
PTR RowIncrementC = ( RowWidthC - QuadrantSize ) << 3;
if (MatrixSize <= bots_app_cutoff_value) {
MultiplyByDivideAndConquer(C, A, B, MatrixSize, RowWidthC, RowWidthA, RowWidthB, 0);
return;
}
/* Initialize quandrant matrices */
#define A11 A
#define B11 B
#define C11 C
A12 = A11 + QuadrantSize;
B12 = B11 + QuadrantSize;
C12 = C11 + QuadrantSize;
A21 = A + (RowWidthA * QuadrantSize);
B21 = B + (RowWidthB * QuadrantSize);
C21 = C + (RowWidthC * QuadrantSize);
A22 = A21 + QuadrantSize;
B22 = B21 + QuadrantSize;
C22 = C21 + QuadrantSize;
/* Allocate Heap Space Here */
StartHeap = Heap = malloc(QuadrantSizeInBytes * NumberOfVariables);
/* ensure that heap is on cache boundary */
if ( ((PTR) Heap) & 31)
Heap = (char*) ( ((PTR) Heap) + 32 - ( ((PTR) Heap) & 31) );
/* Distribute the heap space over the variables */
S1 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S2 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S3 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S4 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S5 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S6 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S7 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S8 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
M2 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
M5 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
T1sMULT = (REAL*) Heap; Heap += QuadrantSizeInBytes;
/***************************************************************************
** Step through all columns row by row (vertically)
** (jumps in memory by RowWidth => bad locality)
** (but we want the best locality on the innermost loop)
***************************************************************************/
for (Row = 0; Row < QuadrantSize; Row++) {
/*************************************************************************
** Step through each row horizontally (addressing elements in each column)
** (jumps linearly througn memory => good locality)
*************************************************************************/
for (Column = 0; Column < QuadrantSize; Column++) {
/***********************************************************
** Within this loop, the following holds for MatrixOffset:
** MatrixOffset = (Row * RowWidth) + Column
** (note: that the unit of the offset is number of reals)
***********************************************************/
/* Element of Global Matrix, such as A, B, C */
#define E(Matrix) (* (REAL*) ( ((PTR) Matrix) + TempMatrixOffset ) )
#define EA(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetA ) )
#define EB(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetB ) )
/* FIXME - may pay to expand these out - got higher speed-ups below */
/* S4 = A12 - ( S2 = ( S1 = A21 + A22 ) - A11 ) */
E(S4) = EA(A12) - ( E(S2) = ( E(S1) = EA(A21) + EA(A22) ) - EA(A11) );
/* S8 = (S6 = B22 - ( S5 = B12 - B11 ) ) - B21 */
E(S8) = ( E(S6) = EB(B22) - ( E(S5) = EB(B12) - EB(B11) ) ) - EB(B21);
/* S3 = A11 - A21 */
E(S3) = EA(A11) - EA(A21);
/* S7 = B22 - B12 */
E(S7) = EB(B22) - EB(B12);
TempMatrixOffset += sizeof(REAL);
MatrixOffsetA += sizeof(REAL);
MatrixOffsetB += sizeof(REAL);
} /* end row loop*/
MatrixOffsetA += RowIncrementA;
MatrixOffsetB += RowIncrementB;
} /* end column loop */
/* M2 = A11 x B11 */
#pragma omp task if (Depth < bots_cutoff_value)
OptimizedStrassenMultiply_par(M2, A11, B11, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1);
/* M5 = S1 * S5 */
#pragma omp task if (Depth < bots_cutoff_value)
OptimizedStrassenMultiply_par(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1);
/* Step 1 of T1 = S2 x S6 + M2 */
#pragma omp task if (Depth < bots_cutoff_value)
OptimizedStrassenMultiply_par(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1);
/* Step 1 of T2 = T1 + S3 x S7 */
#pragma omp task if (Depth < bots_cutoff_value)
OptimizedStrassenMultiply_par(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1);
/* Step 1 of C11 = M2 + A12 * B21 */
#pragma omp task if (Depth < bots_cutoff_value)
OptimizedStrassenMultiply_par(C11, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1);
/* Step 1 of C12 = S4 x B22 + T1 + M5 */
#pragma omp task if (Depth < bots_cutoff_value)
OptimizedStrassenMultiply_par(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1);
/* Step 1 of C21 = T2 - A22 * S8 */
#pragma omp task if (Depth < bots_cutoff_value)
OptimizedStrassenMultiply_par(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1);
/**********************************************
** Synchronization Point
**********************************************/
#pragma omp taskwait
/***************************************************************************
** Step through all columns row by row (vertically)
** (jumps in memory by RowWidth => bad locality)
** (but we want the best locality on the innermost loop)
***************************************************************************/
for (Row = 0; Row < QuadrantSize; Row++) {
/*************************************************************************
** Step through each row horizontally (addressing elements in each column)
** (jumps linearly througn memory => good locality)
*************************************************************************/
for (Column = 0; Column < QuadrantSize; Column += 4) {
REAL LocalM5_0 = *(M5);
REAL LocalM5_1 = *(M5+1);
REAL LocalM5_2 = *(M5+2);
REAL LocalM5_3 = *(M5+3);
REAL LocalM2_0 = *(M2);
REAL LocalM2_1 = *(M2+1);
REAL LocalM2_2 = *(M2+2);
REAL LocalM2_3 = *(M2+3);
REAL T1_0 = *(T1sMULT) + LocalM2_0;
REAL T1_1 = *(T1sMULT+1) + LocalM2_1;
REAL T1_2 = *(T1sMULT+2) + LocalM2_2;
REAL T1_3 = *(T1sMULT+3) + LocalM2_3;
REAL T2_0 = *(C22) + T1_0;
REAL T2_1 = *(C22+1) + T1_1;
REAL T2_2 = *(C22+2) + T1_2;
REAL T2_3 = *(C22+3) + T1_3;
(*(C11)) += LocalM2_0;
(*(C11+1)) += LocalM2_1;
(*(C11+2)) += LocalM2_2;
(*(C11+3)) += LocalM2_3;
(*(C12)) += LocalM5_0 + T1_0;
(*(C12+1)) += LocalM5_1 + T1_1;
(*(C12+2)) += LocalM5_2 + T1_2;
(*(C12+3)) += LocalM5_3 + T1_3;
(*(C22)) = LocalM5_0 + T2_0;
(*(C22+1)) = LocalM5_1 + T2_1;
(*(C22+2)) = LocalM5_2 + T2_2;
(*(C22+3)) = LocalM5_3 + T2_3;
(*(C21 )) = (- *(C21 )) + T2_0;
(*(C21+1)) = (- *(C21+1)) + T2_1;
(*(C21+2)) = (- *(C21+2)) + T2_2;
(*(C21+3)) = (- *(C21+3)) + T2_3;
M5 += 4;
M2 += 4;
T1sMULT += 4;
C11 += 4;
C12 += 4;
C21 += 4;
C22 += 4;
}
C11 = (REAL*) ( ((PTR) C11 ) + RowIncrementC);
C12 = (REAL*) ( ((PTR) C12 ) + RowIncrementC);
C21 = (REAL*) ( ((PTR) C21 ) + RowIncrementC);
C22 = (REAL*) ( ((PTR) C22 ) + RowIncrementC);
}
free(StartHeap);
}
#elif defined(MANUAL_CUTOFF)
void OptimizedStrassenMultiply_par(REAL *C, REAL *A, REAL *B, unsigned MatrixSize,
unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB, int Depth)
{
unsigned QuadrantSize = MatrixSize >> 1; /* MatixSize / 2 */
unsigned QuadrantSizeInBytes = sizeof(REAL) * QuadrantSize * QuadrantSize
+ 32;
unsigned Column, Row;
/************************************************************************
** For each matrix A, B, and C, we'll want pointers to each quandrant
** in the matrix. These quandrants will be addressed as follows:
** -- --
** | A11 A12 |
** | |
** | A21 A22 |
** -- --
************************************************************************/
REAL /* *A11, *B11, *C11, */ *A12, *B12, *C12,
*A21, *B21, *C21, *A22, *B22, *C22;
REAL *S1,*S2,*S3,*S4,*S5,*S6,*S7,*S8,*M2,*M5,*T1sMULT;
#define T2sMULT C22
#define NumberOfVariables 11
PTR TempMatrixOffset = 0;
PTR MatrixOffsetA = 0;
PTR MatrixOffsetB = 0;
char *Heap;
void *StartHeap;
/* Distance between the end of a matrix row and the start of the next row */
PTR RowIncrementA = ( RowWidthA - QuadrantSize ) << 3;
PTR RowIncrementB = ( RowWidthB - QuadrantSize ) << 3;
PTR RowIncrementC = ( RowWidthC - QuadrantSize ) << 3;
if (MatrixSize <= bots_app_cutoff_value) {
MultiplyByDivideAndConquer(C, A, B, MatrixSize, RowWidthC, RowWidthA, RowWidthB, 0);
return;
}
/* Initialize quandrant matrices */
#define A11 A
#define B11 B
#define C11 C
A12 = A11 + QuadrantSize;
B12 = B11 + QuadrantSize;
C12 = C11 + QuadrantSize;
A21 = A + (RowWidthA * QuadrantSize);
B21 = B + (RowWidthB * QuadrantSize);
C21 = C + (RowWidthC * QuadrantSize);
A22 = A21 + QuadrantSize;
B22 = B21 + QuadrantSize;
C22 = C21 + QuadrantSize;
/* Allocate Heap Space Here */
StartHeap = Heap = (char*)malloc(QuadrantSizeInBytes * NumberOfVariables);
/* ensure that heap is on cache boundary */
if ( ((PTR) Heap) & 31)
Heap = (char*) ( ((PTR) Heap) + 32 - ( ((PTR) Heap) & 31) );
/* Distribute the heap space over the variables */
S1 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S2 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S3 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S4 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S5 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S6 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S7 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S8 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
M2 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
M5 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
T1sMULT = (REAL*) Heap; Heap += QuadrantSizeInBytes;
/***************************************************************************
** Step through all columns row by row (vertically)
** (jumps in memory by RowWidth => bad locality)
** (but we want the best locality on the innermost loop)
***************************************************************************/
for (Row = 0; Row < QuadrantSize; Row++) {
/*************************************************************************
** Step through each row horizontally (addressing elements in each column)
** (jumps linearly througn memory => good locality)
*************************************************************************/
for (Column = 0; Column < QuadrantSize; Column++) {
/***********************************************************
** Within this loop, the following holds for MatrixOffset:
** MatrixOffset = (Row * RowWidth) + Column
** (note: that the unit of the offset is number of reals)
***********************************************************/
/* Element of Global Matrix, such as A, B, C */
#define E(Matrix) (* (REAL*) ( ((PTR) Matrix) + TempMatrixOffset ) )
#define EA(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetA ) )
#define EB(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetB ) )
/* FIXME - may pay to expand these out - got higher speed-ups below */
/* S4 = A12 - ( S2 = ( S1 = A21 + A22 ) - A11 ) */
E(S4) = EA(A12) - ( E(S2) = ( E(S1) = EA(A21) + EA(A22) ) - EA(A11) );
/* S8 = (S6 = B22 - ( S5 = B12 - B11 ) ) - B21 */
E(S8) = ( E(S6) = EB(B22) - ( E(S5) = EB(B12) - EB(B11) ) ) - EB(B21);
/* S3 = A11 - A21 */
E(S3) = EA(A11) - EA(A21);
/* S7 = B22 - B12 */
E(S7) = EB(B22) - EB(B12);
TempMatrixOffset += sizeof(REAL);
MatrixOffsetA += sizeof(REAL);
MatrixOffsetB += sizeof(REAL);
} /* end row loop*/
MatrixOffsetA += RowIncrementA;
MatrixOffsetB += RowIncrementB;
} /* end column loop */
if (Depth < bots_cutoff_value)
{
/* M2 = A11 x B11 */
#pragma omp task
OptimizedStrassenMultiply_par(M2, A11, B11, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1);
/* M5 = S1 * S5 */
#pragma omp task
OptimizedStrassenMultiply_par(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1);
/* Step 1 of T1 = S2 x S6 + M2 */
#pragma omp task
OptimizedStrassenMultiply_par(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1);
/* Step 1 of T2 = T1 + S3 x S7 */
#pragma omp task
OptimizedStrassenMultiply_par(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1);
/* Step 1 of C11 = M2 + A12 * B21 */
#pragma omp task
OptimizedStrassenMultiply_par(C11, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1);
/* Step 1 of C12 = S4 x B22 + T1 + M5 */
#pragma omp task
OptimizedStrassenMultiply_par(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1);
/* Step 1 of C21 = T2 - A22 * S8 */
#pragma omp task
OptimizedStrassenMultiply_par(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1);
/**********************************************
** Synchronization Point
**********************************************/
#pragma omp taskwait
}
else
{
/* M2 = A11 x B11 */
OptimizedStrassenMultiply_par(M2, A11, B11, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1);
/* M5 = S1 * S5 */
OptimizedStrassenMultiply_par(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1);
/* Step 1 of T1 = S2 x S6 + M2 */
OptimizedStrassenMultiply_par(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1);
/* Step 1 of T2 = T1 + S3 x S7 */
OptimizedStrassenMultiply_par(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1);
/* Step 1 of C11 = M2 + A12 * B21 */
OptimizedStrassenMultiply_par(C11, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1);
/* Step 1 of C12 = S4 x B22 + T1 + M5 */
OptimizedStrassenMultiply_par(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1);
/* Step 1 of C21 = T2 - A22 * S8 */
OptimizedStrassenMultiply_par(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1);
}
/***************************************************************************
** Step through all columns row by row (vertically)
** (jumps in memory by RowWidth => bad locality)
** (but we want the best locality on the innermost loop)
***************************************************************************/
for (Row = 0; Row < QuadrantSize; Row++) {
/*************************************************************************
** Step through each row horizontally (addressing elements in each column)
** (jumps linearly througn memory => good locality)
*************************************************************************/
for (Column = 0; Column < QuadrantSize; Column += 4) {
REAL LocalM5_0 = *(M5);
REAL LocalM5_1 = *(M5+1);
REAL LocalM5_2 = *(M5+2);
REAL LocalM5_3 = *(M5+3);
REAL LocalM2_0 = *(M2);
REAL LocalM2_1 = *(M2+1);
REAL LocalM2_2 = *(M2+2);
REAL LocalM2_3 = *(M2+3);
REAL T1_0 = *(T1sMULT) + LocalM2_0;
REAL T1_1 = *(T1sMULT+1) + LocalM2_1;
REAL T1_2 = *(T1sMULT+2) + LocalM2_2;
REAL T1_3 = *(T1sMULT+3) + LocalM2_3;
REAL T2_0 = *(C22) + T1_0;
REAL T2_1 = *(C22+1) + T1_1;
REAL T2_2 = *(C22+2) + T1_2;
REAL T2_3 = *(C22+3) + T1_3;
(*(C11)) += LocalM2_0;
(*(C11+1)) += LocalM2_1;
(*(C11+2)) += LocalM2_2;
(*(C11+3)) += LocalM2_3;
(*(C12)) += LocalM5_0 + T1_0;
(*(C12+1)) += LocalM5_1 + T1_1;
(*(C12+2)) += LocalM5_2 + T1_2;
(*(C12+3)) += LocalM5_3 + T1_3;
(*(C22)) = LocalM5_0 + T2_0;
(*(C22+1)) = LocalM5_1 + T2_1;
(*(C22+2)) = LocalM5_2 + T2_2;
(*(C22+3)) = LocalM5_3 + T2_3;
(*(C21 )) = (- *(C21 )) + T2_0;
(*(C21+1)) = (- *(C21+1)) + T2_1;
(*(C21+2)) = (- *(C21+2)) + T2_2;
(*(C21+3)) = (- *(C21+3)) + T2_3;
M5 += 4;
M2 += 4;
T1sMULT += 4;
C11 += 4;
C12 += 4;
C21 += 4;
C22 += 4;
}
C11 = (REAL*) ( ((PTR) C11 ) + RowIncrementC);
C12 = (REAL*) ( ((PTR) C12 ) + RowIncrementC);
C21 = (REAL*) ( ((PTR) C21 ) + RowIncrementC);
C22 = (REAL*) ( ((PTR) C22 ) + RowIncrementC);
}
free(StartHeap);
}
#else
void OptimizedStrassenMultiply_par(REAL *C, REAL *A, REAL *B, unsigned MatrixSize,
unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB, int Depth)
{
unsigned QuadrantSize = MatrixSize >> 1; /* MatixSize / 2 */
unsigned QuadrantSizeInBytes = sizeof(REAL) * QuadrantSize * QuadrantSize
+ 32;
unsigned Column, Row;
/************************************************************************
** For each matrix A, B, and C, we'll want pointers to each quandrant
** in the matrix. These quandrants will be addressed as follows:
** -- --
** | A11 A12 |
** | |
** | A21 A22 |
** -- --
************************************************************************/
REAL /* *A11, *B11, *C11, */ *A12, *B12, *C12,
*A21, *B21, *C21, *A22, *B22, *C22;
REAL *S1,*S2,*S3,*S4,*S5,*S6,*S7,*S8,*M2,*M5,*T1sMULT;
#define T2sMULT C22
#define NumberOfVariables 11
PTR TempMatrixOffset = 0;
PTR MatrixOffsetA = 0;
PTR MatrixOffsetB = 0;
char *Heap;
void *StartHeap;
/* Distance between the end of a matrix row and the start of the next row */
PTR RowIncrementA = ( RowWidthA - QuadrantSize ) << 3;
PTR RowIncrementB = ( RowWidthB - QuadrantSize ) << 3;
PTR RowIncrementC = ( RowWidthC - QuadrantSize ) << 3;
if (MatrixSize <= bots_app_cutoff_value) {
MultiplyByDivideAndConquer(C, A, B, MatrixSize, RowWidthC, RowWidthA, RowWidthB, 0);
return;
}
/* Initialize quandrant matrices */
#define A11 A
#define B11 B
#define C11 C
A12 = A11 + QuadrantSize;
B12 = B11 + QuadrantSize;
C12 = C11 + QuadrantSize;
A21 = A + (RowWidthA * QuadrantSize);
B21 = B + (RowWidthB * QuadrantSize);
C21 = C + (RowWidthC * QuadrantSize);
A22 = A21 + QuadrantSize;
B22 = B21 + QuadrantSize;
C22 = C21 + QuadrantSize;
/* Allocate Heap Space Here */
StartHeap = malloc(QuadrantSizeInBytes * NumberOfVariables);
Heap = (char*)StartHeap;
/* ensure that heap is on cache boundary */
if ( ((PTR) Heap) & 31)
Heap = (char*) ( ((PTR) Heap) + 32 - ( ((PTR) Heap) & 31) );
/* Distribute the heap space over the variables */
S1 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S2 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S3 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S4 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S5 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S6 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S7 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
S8 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
M2 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
M5 = (REAL*) Heap; Heap += QuadrantSizeInBytes;
T1sMULT = (REAL*) Heap; Heap += QuadrantSizeInBytes;
/***************************************************************************
** Step through all columns row by row (vertically)
** (jumps in memory by RowWidth => bad locality)
** (but we want the best locality on the innermost loop)
***************************************************************************/
for (Row = 0; Row < QuadrantSize; Row++) {
/*************************************************************************
** Step through each row horizontally (addressing elements in each column)
** (jumps linearly througn memory => good locality)
*************************************************************************/
for (Column = 0; Column < QuadrantSize; Column++) {
/***********************************************************
** Within this loop, the following holds for MatrixOffset:
** MatrixOffset = (Row * RowWidth) + Column
** (note: that the unit of the offset is number of reals)
***********************************************************/
/* Element of Global Matrix, such as A, B, C */
#define E(Matrix) (* (REAL*) ( ((PTR) Matrix) + TempMatrixOffset ) )
#define EA(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetA ) )
#define EB(Matrix) (* (REAL*) ( ((PTR) Matrix) + MatrixOffsetB ) )
/* FIXME - may pay to expand these out - got higher speed-ups below */
/* S4 = A12 - ( S2 = ( S1 = A21 + A22 ) - A11 ) */
E(S4) = EA(A12) - ( E(S2) = ( E(S1) = EA(A21) + EA(A22) ) - EA(A11) );
/* S8 = (S6 = B22 - ( S5 = B12 - B11 ) ) - B21 */
E(S8) = ( E(S6) = EB(B22) - ( E(S5) = EB(B12) - EB(B11) ) ) - EB(B21);
/* S3 = A11 - A21 */
E(S3) = EA(A11) - EA(A21);
/* S7 = B22 - B12 */
E(S7) = EB(B22) - EB(B12);
TempMatrixOffset += sizeof(REAL);
MatrixOffsetA += sizeof(REAL);
MatrixOffsetB += sizeof(REAL);
} /* end row loop*/
MatrixOffsetA += RowIncrementA;
MatrixOffsetB += RowIncrementB;
} /* end column loop */
/* M2 = A11 x B11 */
#pragma omp task
OptimizedStrassenMultiply_par(M2, A11, B11, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1);
/* M5 = S1 * S5 */
#pragma omp task
OptimizedStrassenMultiply_par(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1);
/* Step 1 of T1 = S2 x S6 + M2 */
#pragma omp task
OptimizedStrassenMultiply_par(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1);
/* Step 1 of T2 = T1 + S3 x S7 */
#pragma omp task
OptimizedStrassenMultiply_par(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1);
/* Step 1 of C11 = M2 + A12 * B21 */
#pragma omp task
OptimizedStrassenMultiply_par(C11, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1);
/* Step 1 of C12 = S4 x B22 + T1 + M5 */
#pragma omp task
OptimizedStrassenMultiply_par(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1);
/* Step 1 of C21 = T2 - A22 * S8 */
#pragma omp task
OptimizedStrassenMultiply_par(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1);
/**********************************************
** Synchronization Point
**********************************************/
#pragma omp taskwait
/***************************************************************************
** Step through all columns row by row (vertically)
** (jumps in memory by RowWidth => bad locality)
** (but we want the best locality on the innermost loop)
***************************************************************************/
for (Row = 0; Row < QuadrantSize; Row++) {
/*************************************************************************
** Step through each row horizontally (addressing elements in each column)
** (jumps linearly througn memory => good locality)
*************************************************************************/
for (Column = 0; Column < QuadrantSize; Column += 4) {
REAL LocalM5_0 = *(M5);
REAL LocalM5_1 = *(M5+1);
REAL LocalM5_2 = *(M5+2);
REAL LocalM5_3 = *(M5+3);
REAL LocalM2_0 = *(M2);
REAL LocalM2_1 = *(M2+1);
REAL LocalM2_2 = *(M2+2);
REAL LocalM2_3 = *(M2+3);
REAL T1_0 = *(T1sMULT) + LocalM2_0;
REAL T1_1 = *(T1sMULT+1) + LocalM2_1;
REAL T1_2 = *(T1sMULT+2) + LocalM2_2;
REAL T1_3 = *(T1sMULT+3) + LocalM2_3;
REAL T2_0 = *(C22) + T1_0;
REAL T2_1 = *(C22+1) + T1_1;
REAL T2_2 = *(C22+2) + T1_2;
REAL T2_3 = *(C22+3) + T1_3;
(*(C11)) += LocalM2_0;
(*(C11+1)) += LocalM2_1;
(*(C11+2)) += LocalM2_2;
(*(C11+3)) += LocalM2_3;
(*(C12)) += LocalM5_0 + T1_0;
(*(C12+1)) += LocalM5_1 + T1_1;
(*(C12+2)) += LocalM5_2 + T1_2;
(*(C12+3)) += LocalM5_3 + T1_3;
(*(C22)) = LocalM5_0 + T2_0;
(*(C22+1)) = LocalM5_1 + T2_1;
(*(C22+2)) = LocalM5_2 + T2_2;
(*(C22+3)) = LocalM5_3 + T2_3;
(*(C21 )) = (- *(C21 )) + T2_0;
(*(C21+1)) = (- *(C21+1)) + T2_1;
(*(C21+2)) = (- *(C21+2)) + T2_2;
(*(C21+3)) = (- *(C21+3)) + T2_3;
M5 += 4;
M2 += 4;
T1sMULT += 4;
C11 += 4;
C12 += 4;
C21 += 4;
C22 += 4;
}
C11 = (REAL*) ( ((PTR) C11 ) + RowIncrementC);
C12 = (REAL*) ( ((PTR) C12 ) + RowIncrementC);
C21 = (REAL*) ( ((PTR) C21 ) + RowIncrementC);
C22 = (REAL*) ( ((PTR) C22 ) + RowIncrementC);
}
free(StartHeap);
}
#endif
/*
* Set an n by n matrix A to random values. The distance between
* rows is an
*/
void init_matrix(int n, REAL *A, int an)
{
int i, j;
for (i = 0; i < n; ++i)
for (j = 0; j < n; ++j)
ELEM(A, an, i, j) = ((double) rand()) / (double) RAND_MAX;
}
/*
* Compare two matrices. Print an error message if they differ by
* more than EPSILON.
*/
int compare_matrix(int n, REAL *A, int an, REAL *B, int bn)
{
int i, j;
REAL c;
for (i = 0; i < n; ++i)
for (j = 0; j < n; ++j) {
/* compute the relative error c */
c = ELEM(A, an, i, j) - ELEM(B, bn, i, j);
if (c < 0.0)
c = -c;
c = c / ELEM(A, an, i, j);
if (c > EPSILON) {
bots_message("Strassen: Wrong answer!\n");
return BOTS_RESULT_UNSUCCESSFUL;
}
}
return BOTS_RESULT_SUCCESSFUL;
}
/*
* Allocate a matrix of side n (therefore n^2 elements)
*/
REAL *alloc_matrix(int n)
{
return (double*)malloc(n * n * sizeof(REAL));
}
void strassen_main_par(REAL *A, REAL *B, REAL *C, int n)
{
bots_message("Computing parallel Strassen algorithm (n=%d) ", n);
#pragma omp parallel
#pragma omp single
//{
#pragma omp task
OptimizedStrassenMultiply_par(C, A, B, n, n, n, n, 1);
//#pragma omp taskwait
//}
bots_message(" completed!\n");
}
void strassen_main_seq(REAL *A, REAL *B, REAL *C, int n)
{
bots_message("Computing sequential Strassen algorithm (n=%d) ", n);
OptimizedStrassenMultiply_seq(C, A, B, n, n, n, n, 1);
bots_message(" completed!\n");
}
|
close_enter_exit.c | // RUN: %libomptarget-compile-run-and-check-generic
// REQUIRES: unified_shared_memory
// UNSUPPORTED: clang-6, clang-7, clang-8, clang-9
// Fails on amdgpu with error: GPU Memory Error
// XFAIL: amdgcn-amd-amdhsa
#include <omp.h>
#include <stdio.h>
#pragma omp requires unified_shared_memory
#define N 1024
int main(int argc, char *argv[]) {
int fails;
void *host_alloc = 0, *device_alloc = 0;
int *a = (int *)malloc(N * sizeof(int));
int dev = omp_get_default_device();
// Init
for (int i = 0; i < N; ++i) {
a[i] = 10;
}
host_alloc = &a[0];
//
// map + target no close
//
#pragma omp target data map(tofrom : a[ : N]) map(tofrom : device_alloc)
{
#pragma omp target map(tofrom : device_alloc)
{ device_alloc = &a[0]; }
}
// CHECK: a used from unified memory.
if (device_alloc == host_alloc)
printf("a used from unified memory.\n");
//
// map + target with close
//
device_alloc = 0;
#pragma omp target data map(close, tofrom : a[ : N]) map(tofrom : device_alloc)
{
#pragma omp target map(tofrom : device_alloc)
{ device_alloc = &a[0]; }
}
// CHECK: a copied to device.
if (device_alloc != host_alloc)
printf("a copied to device.\n");
//
// map + use_device_ptr no close
//
device_alloc = 0;
#pragma omp target data map(tofrom : a[ : N]) use_device_ptr(a)
{ device_alloc = &a[0]; }
// CHECK: a used from unified memory with use_device_ptr.
if (device_alloc == host_alloc)
printf("a used from unified memory with use_device_ptr.\n");
//
// map + use_device_ptr close
//
device_alloc = 0;
#pragma omp target data map(close, tofrom : a[ : N]) use_device_ptr(a)
{ device_alloc = &a[0]; }
// CHECK: a used from device memory with use_device_ptr.
if (device_alloc != host_alloc)
printf("a used from device memory with use_device_ptr.\n");
//
// map enter/exit + close
//
device_alloc = 0;
#pragma omp target enter data map(close, to : a[ : N])
#pragma omp target map(from : device_alloc)
{
device_alloc = &a[0];
a[0] = 99;
}
// 'close' is missing, so the runtime must check whether s is actually in
// shared memory in order to determine whether to transfer data and delete the
// allocation.
#pragma omp target exit data map(from : a[ : N])
// CHECK: a has been mapped to the device.
if (device_alloc != host_alloc)
printf("a has been mapped to the device.\n");
// CHECK: a[0]=99
// CHECK: a is present: 0
printf("a[0]=%d\n", a[0]);
printf("a is present: %d\n", omp_target_is_present(a, dev));
free(a);
// CHECK: Done!
printf("Done!\n");
return 0;
}
|
strsm.c | #include "blas.h"
#include "error.h"
#include <stdio.h>
#include "handle.h"
#include "config.h"
#include "strsm.fatbin.c"
static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; }
static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; }
static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj,
const void * B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static inline CUresult cuMemcpyDtoH2DAsync(void * A, size_t lda, size_t ai, size_t aj,
CUdeviceptr B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static const float zero = 0.0f;
static const float one = 1.0f;
void strsm(CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag,
size_t m, size_t n,
float alpha, const float * restrict A, size_t lda,
float * restrict B, size_t ldb) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return;
}
if (m == 0 || n == 0)
return;
if (alpha == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] = zero;
}
return;
}
if (side == CBlasLeft) {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
size_t k = m - 1;
do {
if (B[j * ldb + k] != zero) {
if (diag == CBlasNonUnit) B[j * ldb + k] /= A[k * lda + k];
register float temp = B[j * ldb + k];
for (size_t i = 0; i < k; i++)
B[j * ldb + i] -= temp * A[k * lda + i];
}
} while (k-- > 0);
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
for (size_t k = 0; k < m; k++) {
if (B[j * ldb + k] != zero) {
if (diag == CBlasNonUnit) B[j * ldb + k] /= A[k * lda + k];
register float temp = B[j * ldb + k];
for (size_t i = k + 1; i < m; i++)
B[j * ldb + i] -= temp * A[k * lda + i];
}
}
}
}
}
else {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++) {
register float temp = alpha * B[j * ldb + i];
for (size_t k = 0; k < i; k++)
temp -= A[i * lda + k] * B[j * ldb + k];
if (diag == CBlasNonUnit) temp /= A[i * lda + i];
B[j * ldb + i] = temp;
}
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t i = m - 1;
do {
register float temp = alpha * B[j * ldb + i];
for (size_t k = i + 1; k < m; k++)
temp -= A[i * lda + k] * B[j * ldb + k];
if (diag == CBlasNonUnit) temp /= A[i * lda + i];
B[j * ldb + i] = temp;
} while (i-- > 0);
}
}
}
}
else {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
for (size_t j = 0; j < n; j++) {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
for (size_t k = 0; k < j; k++) {
if (A[j * lda + k] != zero) {
register float temp = A[j * lda + k];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (diag == CBlasNonUnit) {
register float temp = one / A[j * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= temp;
}
}
}
else {
size_t j = n - 1;
do {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
for (size_t k = j + 1; k < n; k++) {
if (A[j * lda + k] != zero) {
register float temp = A[j * lda + k];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (diag == CBlasNonUnit) {
register float temp = one / A[j * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= temp;
}
} while (j-- > 0);
}
}
else {
if (uplo == CBlasUpper) {
size_t k = n - 1;
do {
if (diag == CBlasNonUnit) {
register float temp = one / A[k * lda + k];
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= temp;
}
for (size_t j = 0; j < k; j++) {
if (A[k * lda + j] != zero) {
register float temp = A[k * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= alpha;
}
} while (k-- > 0);
}
else {
for (size_t k = 0; k < n; k++) {
if (diag == CBlasNonUnit) {
register float temp = one / A[k * lda + k];
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= temp;
}
for (size_t j = k + 1; j < n; j++) {
if (A[k * lda + j] != zero) {
register float temp = A[k * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= alpha;
}
}
}
}
}
}
CUresult cuStrsm(CUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag,
size_t m, size_t n,
float alpha, CUdeviceptr A, size_t lda,
CUdeviceptr B, size_t ldb, CUstream stream) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
CU_ERROR_CHECK(cuCtxPushCurrent(handle->context));
if (handle->strsm == NULL)
CU_ERROR_CHECK(cuModuleLoadData(&handle->strsm, imageBytes));
const unsigned int bx = 8;
const unsigned int by = 8;
const unsigned int mb = (side == CBlasLeft) ? 8 : 64;
const unsigned int nb = (side == CBlasLeft) ? 64 : 8;
char name[102];
snprintf(name, 102,
"_Z5strsmIL9CBlasSide%dEL9CBlasUplo%dEL14CBlasTranspose%dEL9CBlasDiag%dELj%uELj%uELj%uELj%uEEvPKfPffiiii",
side, uplo, transA, diag, mb, nb, bx, by);
CUfunction function;
CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->strsm, name));
void * params[] = { &A, &B, &alpha, &lda, &ldb, &m, &n };
const unsigned int gx = (side == CBlasLeft) ? 1 : (unsigned int)(m + mb - 1) / mb;
const unsigned int gy = (side == CBlasLeft) ? (unsigned int)(n + nb - 1) / nb : 1;
CU_ERROR_CHECK(cuLaunchKernel(function, gx, gy, 1, bx, by, 1, 0, stream, params, NULL));
CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context));
return CUDA_SUCCESS;
}
CUresult cuMultiGPUStrsm(CUmultiGPUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag,
size_t m, size_t n,
float alpha, const float * restrict A, size_t lda,
float * restrict B, size_t ldb) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
if (alpha == zero) {
sgemm(CBlasNoTrans, CBlasNoTrans, m, n, 0, zero, A, lda, B, ldb, zero, B, ldb);
return CUDA_SUCCESS;
}
const size_t mb = (transA == CBlasNoTrans) ? SGEMM_N_MB : SGEMM_T_MB;
const size_t nb = SGEMM_N_NB;
if (side == CBlasLeft) {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
size_t r = m % mb;
size_t i = (r == 0) ? m : m + mb - r;
do {
i -= mb;
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[(i + ib) * lda + i], lda, &B[i + ib], ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasLeft, CBlasUpper, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
} while (i > 0);
}
else {
for (size_t i = 0; i < m; i += mb) {
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, i, -one, &A[i], lda, B, ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasLeft, CBlasLower, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
}
}
}
else {
if (uplo == CBlasUpper) {
for (size_t i = 0; i < m; i += mb) {
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasTrans, CBlasNoTrans, ib, n, i, -one, &A[i * lda], lda, B, ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasLeft, CBlasUpper, CBlasTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
}
}
else {
size_t r = m % mb;
size_t i = (r == 0) ? m : m + mb - r;
do {
i -= mb;
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[i * lda + i + ib], lda, &B[i + ib], ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasLeft, CBlasLower, CBlasTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
} while (i > 0);
}
}
}
else {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
for (size_t j = 0; j < n; j += nb) {
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, j, -one, B, ldb, &A[j * lda], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasRight, CBlasUpper, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
}
}
else {
size_t r = n % nb;
size_t j = (r == 0) ? n : n + nb - r;
do {
j -= nb;
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[j * lda + j + jb], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasRight, CBlasLower, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
} while (j > 0);
}
}
else {
if (uplo == CBlasUpper) {
size_t r = n % nb;
size_t j = (r == 0) ? n : n + nb - r;
do {
j -= nb;
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[(j + jb) * lda + j], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasRight, CBlasUpper, CBlasTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
} while (j > 0);
}
else {
for (size_t j = 0; j < n; j += nb) {
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasTrans, m, jb, j, -one, B, ldb, &A[j], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasRight, CBlasLower, CBlasTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
}
}
}
}
return CUDA_SUCCESS;
}
|
GB_binop__rdiv_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__rdiv_uint8
// A.*B function (eWiseMult): GB_AemultB__rdiv_uint8
// A*D function (colscale): GB_AxD__rdiv_uint8
// D*A function (rowscale): GB_DxB__rdiv_uint8
// C+=B function (dense accum): GB_Cdense_accumB__rdiv_uint8
// C+=b function (dense accum): GB_Cdense_accumb__rdiv_uint8
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rdiv_uint8
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rdiv_uint8
// C=scalar+B GB_bind1st__rdiv_uint8
// C=scalar+B' GB_bind1st_tran__rdiv_uint8
// C=A+scalar GB_bind2nd__rdiv_uint8
// C=A'+scalar GB_bind2nd_tran__rdiv_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_IDIV_UNSIGNED (y, x, 8) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_UINT8 || GxB_NO_RDIV_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__rdiv_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__rdiv_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t bij = Bx [p] ;
Cx [p] = GB_IDIV_UNSIGNED (bij, x, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__rdiv_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = GB_IDIV_UNSIGNED (y, aij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 8) ; \
}
GrB_Info GB_bind1st_tran__rdiv_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 8) ; \
}
GrB_Info GB_bind2nd_tran__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
l1_normMEX_256.c | #include "mex.h"
#include <omp.h>
#include <math.h>
#include <emmintrin.h>
#include <xmmintrin.h>
#include <immintrin.h>
void mexFunction(int nlhs, mxArray *left[], int nrhs, const mxArray *right[]) {
/* Declare variables */
mwSize elem, cmplx, cmplx1, cmplx2, cmplx3;
long long i, elem2;
const mwSize size[]={1,1};
mxClassID precision, precision1;
mxArray *X1, *X2, *T, *Y;
double *pX1r, *pX1i, *pX2r, *pX2i, *pYr, *pT, *pSd, Td, Sd;
double xr, xi, L1, dL1[4];
__m256d vTd, vSd, vL1, vxr, vxi;
float *pX1rf, *pX1if, *pX2rf, *pX2if, *pYrf, *pTf, *pSf, Tf, Sf;
float xrf, xif, L1f, dL1f[8];
__m256 vTf, vSf, vL1f, vxrf, vxif;
/* Get number of elements */
elem = mxGetNumberOfElements(right[0]);
/* mexPrintf("elem: %i\n",elem);*/
/* Test for complex and obtain data class */
cmplx = mxIsComplex(right[0]);
cmplx1 = mxIsComplex(right[1]);
cmplx2 = mxIsComplex(right[2]);
cmplx3 = mxIsComplex(right[3]);
if (cmplx != cmplx1)
mexErrMsgTxt("Inputs 0 and 1 have different complexity");
if (cmplx2)
mexErrMsgTxt("Input 2 is complex (must be real)");
if (cmplx3)
mexErrMsgTxt("Input 3 is complex (must be real)");
/* Obtain and test data class */
precision = mxGetClassID(right[0]);
precision1 = mxGetClassID(right[1]);
if (precision != precision1)
mexErrMsgTxt("Inputs 0 and 1 have different precision");
/* Get pointers to input arrays and create output array */
Y = mxCreateNumericArray(2, size, precision, mxREAL);
if (precision == mxDOUBLE_CLASS) {
pX1r = mxGetPr(right[0]);
pX2r = mxGetPr(right[1]);
if (cmplx) {
pX1i = mxGetPi(right[0]);
pX2i = mxGetPi(right[1]);
}
pYr = mxGetPr(Y);
}
else {
pX1rf = mxGetData(right[0]);
pX2rf = mxGetData(right[1]);
if (cmplx) {
pX1if = mxGetImagData(right[0]);
pX2if = mxGetImagData(right[1]);
}
pYrf = mxGetData(Y);
}
/* Get pointer to input scalar */
if (mxGetClassID(right[2]) == mxDOUBLE_CLASS)
pT = mxGetData(right[2]);
else
pTf = mxGetData(right[2]);
/* Get pointer to smoothing factor */
if (mxGetClassID(right[3]) == mxDOUBLE_CLASS)
pSd = mxGetData(right[3]);
else
pSf = mxGetData(right[3]);
/* Convert scalars to same data type as input arrays */
if (precision == mxDOUBLE_CLASS) {
if (mxGetClassID(right[2]) == mxDOUBLE_CLASS)
Td = (double)pT[0];
else
Td = (double)pTf[0];
if (mxGetClassID(right[3]) == mxDOUBLE_CLASS)
Sd = (double)pSd[0];
else
Sd = (double)pSf[0];
}
else {
if (mxGetClassID(right[2]) == mxDOUBLE_CLASS)
Tf = (float)pT[0];
else
Tf = (float)pTf[0];
if (mxGetClassID(right[3]) == mxDOUBLE_CLASS)
Sf = (float)pSd[0];
else
Sf = (float)pSf[0];
}
omp_set_num_threads(16);
/* Loop through and compute the abs of the combined coefficients then sum */
if (precision == mxDOUBLE_CLASS) {
if (cmplx) {
/* Compute the number of elements for SIMD loop */
elem2 = (elem/4)*4;
/* SIMD variables */
vTd = _mm256_set1_pd(Td);
vSd = _mm256_set1_pd(Sd);
vL1 = _mm256_setzero_pd();
#pragma omp parallel for private(i,vxr,vxi) reduction(+: vL1)
for (i=0; i<elem2; i+=4) {
vxr = _mm256_add_pd(_mm256_load_pd(pX1r+i),_mm256_mul_pd(vTd,_mm256_load_pd(pX2r+i)));
vxr = _mm256_mul_pd(vxr,vxr);
vxi = _mm256_add_pd(_mm256_load_pd(pX1i+i),_mm256_mul_pd(vTd,_mm256_load_pd(pX2i+i)));
vxi = _mm256_mul_pd(vxi,vxi);
vL1 = _mm256_add_pd(vL1,_mm256_sqrt_pd(_mm256_add_pd(_mm256_add_pd(vxr,vxi),vSd)));
}
/* Save results */
_mm256_store_pd(dL1,vL1);
L1 = dL1[0] + dL1[1] + dL1[2] + dL1[3];
/* Finish the last few elements */
for (i=elem2; i<elem; i++) {
xr = pX1r[i] + Td*pX2r[i];
xi = pX1i[i] + Td*pX2i[i];
L1 += sqrt(xr*xr + xi*xi + Sd);
}
}
else {
#pragma omp parallel for private(i,xr) reduction(+: L1)
for (i=0; i<elem; i++) {
xr = pX1r[i] + Td*pX2r[i];
L1 += sqrt(xr*xr + Sd);
/*L1 += fabs(pX1r[i] + Td*pX2r[i]);*/
}
}
pYr[0] = L1;
}
else {
if (cmplx) {
/* Compute the number of elements for SIMD loop */
elem2 = (elem/8)*8;
/* SIMD variables */
vTf = _mm256_set1_ps(Tf);
vSf = _mm256_set1_ps(Sf);
vL1f = _mm256_setzero_ps();
#pragma omp parallel for private(i,vxrf,vxif) reduction(+: vL1f)
for (i=0; i<elem2; i+=8) {
vxrf = _mm256_add_ps(_mm256_load_ps(pX1rf+i),_mm256_mul_ps(vTf,_mm256_load_ps(pX2rf+i)));
vxrf = _mm256_mul_ps(vxrf,vxrf);
vxif = _mm256_add_ps(_mm256_load_ps(pX1if+i),_mm256_mul_ps(vTf,_mm256_load_ps(pX2if+i)));
vxif = _mm256_mul_ps(vxif,vxif);
vL1f = _mm256_add_ps(vL1f,_mm256_sqrt_ps(_mm256_add_ps(_mm256_add_ps(vxrf,vxif),vSf)));
}
/* Save results */
_mm256_store_ps(dL1f,vL1f);
L1f = dL1f[0] + dL1f[1] + dL1f[2] + dL1f[3] + dL1f[4] + dL1f[5] + dL1f[6] + dL1f[7];
/* Finish the last few elements */
for (i=elem2; i<elem; i++) {
xrf = pX1rf[i] + Tf*pX2rf[i];
xif = pX1if[i] + Tf*pX2if[i];
L1f += sqrt(xrf*xrf + xif*xif + Sf);
}
pYrf[0] = L1f;
}
else {
#pragma omp parallel for private(i,xrf) reduction(+: L1)
for (i=0; i<elem; i++) {
xrf = pX1rf[i] + Tf*pX2rf[i];
L1 += sqrt(xrf*xrf + Sf);
/*L1 += fabs(pX1rf[i] + Tf*pX2rf[i]);*/
}
pYrf[0] = L1;
}
}
/* Return values */
left[0] = Y;
}
|
Euclid_apply.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.9 $
***********************************************************************EHEADER*/
#include "_hypre_Euclid.h"
/* #include "Euclid_dh.h" */
/* #include "Mat_dh.h" */
/* #include "Factor_dh.h" */
/* #include "Parser_dh.h" */
/* #include "TimeLog_dh.h" */
/* #include "SubdomainGraph_dh.h" */
static void scale_rhs_private(Euclid_dh ctx, double *rhs);
static void permute_vec_n2o_private(Euclid_dh ctx, double *xIN, double *xOUT);
static void permute_vec_o2n_private(Euclid_dh ctx, double *xIN, double *xOUT);
#undef __FUNC__
#define __FUNC__ "Euclid_dhApply"
void Euclid_dhApply(Euclid_dh ctx, double *rhs, double *lhs)
{
START_FUNC_DH
double *rhs_, *lhs_;
double t1, t2;
t1 = hypre_MPI_Wtime();
/* default settings; for everything except PILU */
ctx->from = 0;
ctx->to = ctx->m;
/* case 1: no preconditioning */
if (! strcmp(ctx->algo_ilu, "none") || ! strcmp(ctx->algo_par, "none")) {
HYPRE_Int i, m = ctx->m;
for (i=0; i<m; ++i) lhs[i] = rhs[i];
goto END_OF_FUNCTION;
}
/*----------------------------------------------------------------
* permute and scale rhs vector
*----------------------------------------------------------------*/
/* permute rhs vector */
if (ctx->sg != NULL) {
/* hypre_printf("@@@@@@@@@@@@@@@@@ permute_vec_n2o_private\n"); */
permute_vec_n2o_private(ctx, rhs, lhs); CHECK_V_ERROR;
rhs_ = lhs;
lhs_ = ctx->work2;
} else {
rhs_ = rhs;
lhs_ = lhs;
}
/* scale rhs vector */
if (ctx->isScaled) {
/* hypre_printf("@@@@@@@@@@@@@@@@@ scale_rhs_private\n"); */
scale_rhs_private(ctx, rhs_); CHECK_V_ERROR;
}
/* note: rhs_ is permuted, scaled; the input, "rhs" vector has
not been disturbed.
*/
/*----------------------------------------------------------------
* big switch to choose the appropriate triangular solve
*----------------------------------------------------------------*/
/* sequential and mpi block jacobi cases */
if (np_dh == 1 ||
! strcmp(ctx->algo_par, "bj") ) {
Factor_dhSolveSeq(rhs_, lhs_, ctx); CHECK_V_ERROR;
}
/* pilu case */
else {
Factor_dhSolve(rhs_, lhs_, ctx); CHECK_V_ERROR;
}
/*----------------------------------------------------------------
* unpermute lhs vector
* (note: don't need to unscale, because we were clever)
*----------------------------------------------------------------*/
if (ctx->sg != NULL) {
permute_vec_o2n_private(ctx, lhs_, lhs); CHECK_V_ERROR;
}
END_OF_FUNCTION: ;
t2 = hypre_MPI_Wtime();
/* collective timing for triangular solves */
ctx->timing[TRI_SOLVE_T] += (t2 - t1);
/* collective timing for setup+krylov+triSolves
(intent is to time linear solve, but this is
at best probelematical!)
*/
ctx->timing[TOTAL_SOLVE_TEMP_T] = t2 - ctx->timing[SOLVE_START_T];
/* total triangular solve count */
ctx->its += 1;
ctx->itsTotal += 1;
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "scale_rhs_private"
void scale_rhs_private(Euclid_dh ctx, double *rhs)
{
START_FUNC_DH
HYPRE_Int i, m = ctx->m;
REAL_DH *scale = ctx->scale;
/* if matrix was scaled, must scale the rhs */
if (scale != NULL) {
#ifdef USING_OPENMP_DH
#pragma omp for schedule(static)
#endif
for (i=0; i<m; ++i) { rhs[i] *= scale[i]; }
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "permute_vec_o2n_private"
void permute_vec_o2n_private(Euclid_dh ctx, double *xIN, double *xOUT)
{
START_FUNC_DH
HYPRE_Int i, m = ctx->m;
HYPRE_Int *o2n = ctx->sg->o2n_col;
for (i=0; i<m; ++i) xOUT[i] = xIN[o2n[i]];
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "permute_vec_n2o_private"
void permute_vec_n2o_private(Euclid_dh ctx, double *xIN, double *xOUT)
{
START_FUNC_DH
HYPRE_Int i, m = ctx->m;
HYPRE_Int *n2o = ctx->sg->n2o_row;
for (i=0; i<m; ++i) xOUT[i] = xIN[n2o[i]];
END_FUNC_DH
}
|
openmp.c | /*
* $PIP_license: <Simplified BSD License>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
* $
* $RIKEN_copyright: Riken Center for Computational Sceience (R-CCS),
* System Software Development Team, 2016-2021
* $
* $PIP_TESTSUITE: Version 1.1.0$
*
* $Author: Atsushi Hori (R-CCS) mailto: ahori@riken.jp or ahori@me.com
* $
*/
#include <omp.h>
#include <stdlib.h>
#include <stdio.h>
int main( int argc, char **argv ) {
int nth, tid;
#pragma omp parallel private(nth, tid)
{
char *pipid = getenv( "PIP_TEST_PIPID" );
/* Obtain thread number */
nth = omp_get_num_threads();
tid = omp_get_thread_num();
printf( "[%s] Hello World from OMP thread = %d/%d\n", pipid, tid, nth );
}
return 0;
}
|
stat_ops_probability.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "stat_ops.h"
#include "utility.h"
#include "constant.h"
// calculate probability with which we obtain 0 at target qubit
double M0_prob(UINT target_qubit_index, const CTYPE* state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = 1ULL << target_qubit_index;
ITYPE state_index;
double sum = 0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_0 = insert_zero_to_basis_index(state_index, mask, target_qubit_index);
sum += pow(cabs(state[basis_0]), 2);
}
return sum;
}
// calculate probability with which we obtain 1 at target qubit
double M1_prob(UINT target_qubit_index, const CTYPE* state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = 1ULL << target_qubit_index;
ITYPE state_index;
double sum = 0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_1 = insert_zero_to_basis_index(state_index, mask, target_qubit_index) ^ mask;
sum += pow(cabs(state[basis_1]), 2);
}
return sum;
}
// calculate merginal probability with which we obtain the set of values measured_value_list at sorted_target_qubit_index_list
// warning: sorted_target_qubit_index_list must be sorted.
double marginal_prob(const UINT* sorted_target_qubit_index_list, const UINT* measured_value_list, UINT target_qubit_index_count, const CTYPE* state, ITYPE dim) {
ITYPE loop_dim = dim >> target_qubit_index_count;
ITYPE state_index;
double sum = 0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis = state_index;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_target_qubit_index_list[cursor];
ITYPE mask = 1ULL << insert_index;
basis = insert_zero_to_basis_index(basis, mask, insert_index);
basis ^= mask * measured_value_list[cursor];
}
sum += pow(cabs(state[basis]), 2);
}
return sum;
}
// calculate entropy of probability distribution of Z-basis measurements
double measurement_distribution_entropy(const CTYPE *state, ITYPE dim){
ITYPE index;
double ent=0;
const double eps = 1e-15;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:ent)
#endif
for(index = 0; index < dim; ++index){
double prob = pow(cabs(state[index]),2);
prob = (prob > eps)?prob:eps;
ent += -1.0*prob*log(prob);
}
return ent;
}
|
GB_binop__islt_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_int8)
// A.*B function (eWiseMult): GB (_AemultB_01__islt_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__islt_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int8)
// A*D function (colscale): GB (_AxD__islt_int8)
// D*A function (rowscale): GB (_DxB__islt_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int8)
// C=scalar+B GB (_bind1st__islt_int8)
// C=scalar+B' GB (_bind1st_tran__islt_int8)
// C=A+scalar GB (_bind2nd__islt_int8)
// C=A'+scalar GB (_bind2nd_tran__islt_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_INT8 || GxB_NO_ISLT_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__islt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__islt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_parallel_default.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
int test_omp_parallel_default()
{
int i;
int sum;
int mysum;
int known_sum;
sum =0;
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 ;
#pragma omp parallel default(shared) private(i) private(mysum)
{
mysum = 0;
#pragma omp for
for (i = 1; i <= LOOPCOUNT; i++) {
mysum = mysum + i;
}
#pragma omp critical
{
sum = sum + mysum;
} /* end of critical */
} /* end of parallel */
if (known_sum != sum) {
fprintf(stderr, "KNOWN_SUM = %d; SUM = %d\n", known_sum, sum);
}
return (known_sum == sum);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_parallel_default()) {
num_failed++;
}
}
return num_failed;
}
|
GB_unaryop__lnot_uint16_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint16_uint32
// op(A') function: GB_tran__lnot_uint16_uint32
// C type: uint16_t
// A type: uint32_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint16_uint32
(
uint16_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint16_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
alignblt.c | /********************************************************************[libaroma]*
* Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*______________________________________________________________________________
*
* Filename : alignblt.c
* Description : align blit
*
* + This is part of libaroma, an embedded ui toolkit.
* + 06/04/15 - Author(s): Ahmad Amarullah
*
*/
#ifndef __libaroma_alignblt_c__
#define __libaroma_alignblt_c__
#include <aroma_internal.h>
void libaroma_blt_align16(wordp __restrict dst, wordp __restrict src,
int w, int h, int dst_stride, int src_stride) {
int i;
int w2 = w<<1;
int ds = w2 + dst_stride;
int ss = w2 + src_stride;
bytep d = (bytep) dst;
bytep s = (bytep) src;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < h; i++) {
memcpy(
d+ds*i, s+ss*i, w2
);
}
}
void libaroma_blt_align32_to16(wordp __restrict dst, dwordp __restrict src,
int w, int h, int dst_stride, int src_stride) {
int i;
int dline = w+(dst_stride>>1);
int sline = w+(src_stride>>2);
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < h; i++) {
libaroma_dither_line(
i, w, dst+dline*i, src+sline*i
);
}
}
void libaroma_blt_align16_to32(dwordp __restrict dst, wordp __restrict src,
int w, int h, int dst_stride, int src_stride) {
int i;
int dline = w+(dst_stride>>2);
int sline = w+(src_stride>>1);
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < h; i++) {
libaroma_btl32(
w,dst+dline*i,src+sline*i
);
}
}
void libaroma_blt_align32(dwordp __restrict dst, dwordp __restrict src,
int w, int h, int dst_stride, int src_stride) {
int i;
int w4 = w<<2;
int ds = w4 + dst_stride;
int ss = w4 + src_stride;
bytep d = (bytep) dst;
bytep s = (bytep) src;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < h; i++) {
memcpy(
d+ds*i, s+ss*i, w4
);
}
}
void libaroma_blt_align_to32_pos(dwordp __restrict dst, wordp __restrict src,
int w, int h, int dst_stride, int src_stride,
bytep rgb_pos) {
int i;
int dline = w+(dst_stride>>2);
int sline = w+(src_stride>>1);
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < h; i++) {
libaroma_color_copy32(
dst+dline*i, src+sline*i, w, rgb_pos
);
}
}
void libaroma_blt_align_to16_pos(wordp __restrict dst, dwordp __restrict src,
int w, int h, int dst_stride, int src_stride,
bytep __restrict rgb_pos) {
int i;
int dline = w+(dst_stride>>1);
int sline = w+(src_stride>>2);
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < h; i++) {
libaroma_color_copy16(
dst+dline*i, src+sline*i, w, rgb_pos
);
}
}
void libaroma_blt_align24(bytep __restrict dst, wordp __restrict src,
int w, int h, int dst_stride, int src_stride) {
int i;
int dline = (w*3)+(dst_stride);
int sline = w+(src_stride>>1);
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < h; i++) {
libaroma_color_copy_bgr24(
dst+dline*i, src+sline*i, w
);
}
}
#endif /* __libaroma_alignblt_c__ */
|
omp_lock.c | #include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
omp_lock_t mylock;
int main()
{
omp_init_lock(&mylock);
#pragma omp parallel
{
#pragma omp sections
{
#pragma omp section
{
omp_set_lock(&mylock);
sleep(1);
printf("[%d] 1. Hello world\n", omp_get_thread_num());
omp_unset_lock(&mylock);
}
#pragma omp section
{
omp_set_lock(&mylock);
sleep(1);
printf("[%d] 2. Hello world\n", omp_get_thread_num());
omp_unset_lock(&mylock);
}
#pragma omp section
{
omp_set_lock(&mylock);
sleep(1);
printf("[%d] 3. Hello world\n", omp_get_thread_num());
omp_unset_lock(&mylock);
}
#pragma omp section
{
omp_set_lock(&mylock);
sleep(1);
printf("[%d] 4. Hello world\n", omp_get_thread_num());
omp_unset_lock(&mylock);
}
} /* sections */
} /* parallel */
omp_destroy_lock(&mylock);
return 0;
}
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImage method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _PixelChannels
{
double
channel[CompositePixelChannel];
} PixelChannels;
static PixelChannels **DestroyPixelThreadSet(const Image *images,
PixelChannels **pixels)
{
register ssize_t
i;
size_t
rows;
assert(pixels != (PixelChannels **) NULL);
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
for (i=0; i < (ssize_t) rows; i++)
if (pixels[i] != (PixelChannels *) NULL)
pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]);
pixels=(PixelChannels **) RelinquishMagickMemory(pixels);
return(pixels);
}
static PixelChannels **AcquirePixelThreadSet(const Image *images)
{
const Image
*next;
PixelChannels
**pixels;
register ssize_t
i;
size_t
columns,
rows;
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
pixels=(PixelChannels **) AcquireQuantumMemory(rows,sizeof(*pixels));
if (pixels == (PixelChannels **) NULL)
return((PixelChannels **) NULL);
(void) memset(pixels,0,rows*sizeof(*pixels));
columns=MagickMax(GetImageListLength(images),MaxPixelChannels);
for (next=images; next != (Image *) NULL; next=next->next)
columns=MagickMax(next->columns,columns);
for (i=0; i < (ssize_t) rows; i++)
{
register ssize_t
j;
pixels[i]=(PixelChannels *) AcquireQuantumMemory(columns,sizeof(**pixels));
if (pixels[i] == (PixelChannels *) NULL)
return(DestroyPixelThreadSet(images,pixels));
for (j=0; j < (ssize_t) columns; j++)
{
register ssize_t
k;
for (k=0; k < MaxPixelChannels; k++)
pixels[i][j].channel[k]=0.0;
}
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelChannels
*color_1,
*color_2;
double
distance;
register ssize_t
i;
color_1=(const PixelChannels *) x;
color_2=(const PixelChannels *) y;
distance=0.0;
for (i=0; i < MaxPixelChannels; i++)
distance+=color_1->channel[i]-(double) color_2->channel[i];
return(distance < 0 ? -1 : distance > 0 ? 1 : 0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel,
const MagickEvaluateOperator op,const double value)
{
double
result;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(double) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a positive
result. It differs from % or fmod() that returns a 'truncated modulus'
result, where floor() is replaced by trunc() and could return a
negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(double) ((size_t) pixel & (size_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise,
value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) ((size_t) pixel << (size_t) (value+0.5));
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+
1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(double) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(double) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(double) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(double) ((size_t) pixel | (size_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise,
value);
break;
}
case PowEvaluateOperator:
{
result=(double) (QuantumRange*pow((double) (QuantumScale*pixel),(double)
value));
break;
}
case RightShiftEvaluateOperator:
{
result=(double) ((size_t) pixel >> (size_t) (value+0.5));
break;
}
case RootMeanSquareEvaluateOperator:
{
result=(double) (pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(double) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(double) (((double) pixel > value) ? QuantumRange : pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise,
value);
break;
}
case XorEvaluateOperator:
{
result=(double) ((size_t) pixel ^ (size_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
rows;
q=images;
columns=images->columns;
rows=images->rows;
for (p=images; p != (Image *) NULL; p=p->next)
{
if (p->number_channels > q->number_channels)
q=p;
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict evaluate_pixels;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j,
k;
for (j=0; j < (ssize_t) number_images; j++)
for (k=0; k < MaxPixelChannels; k++)
evaluate_pixel[j].channel[k]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
register ssize_t
i;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[j].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p),op,
evaluate_pixel[j].channel[i]);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
for (k=0; k < (ssize_t) GetPixelChannels(image); k++)
q[k]=ClampToQuantum(evaluate_pixel[j/2].channel[k]);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register ssize_t
i,
x;
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[x].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p),j == 0 ?
AddEvaluateOperator : op,evaluate_pixel[x].channel[i]);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
switch (op)
{
case MeanEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]/=(double) number_images;
break;
}
case MultiplyEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
evaluate_pixel[x].channel[i]*=QuantumScale;
}
break;
}
case RootMeanSquareEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/
number_images);
break;
}
default:
break;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
result;
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
result=ApplyEvaluateOperator(random_info[id],q[i],op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
q[i]=ClampToQuantum(result);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EvaluateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImage method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
double
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+
c1*x^2+c2*x+c3).
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel+parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
double
amplitude,
bias,
frequency,
phase;
/*
Sinusoid: frequency, phase, amplitude, bias.
*/
frequency=(number_parameters >= 1) ? parameters[0] : 1.0;
phase=(number_parameters >= 2) ? parameters[1] : 0.0;
amplitude=(number_parameters >= 3) ? parameters[2] : 0.5;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (QuantumRange*(amplitude*sin((double) (2.0*
MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias));
break;
}
case ArcsinFunction:
{
double
bias,
center,
range,
width;
/*
Arcsin (peged at range limits for invalid results): width, center,
range, and bias.
*/
width=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=2.0/width*(QuantumScale*pixel-center);
if ( result <= -1.0 )
result=bias-range/2.0;
else
if (result >= 1.0)
result=bias+range/2.0;
else
result=(double) (range/MagickPI*asin((double) result)+bias);
result*=QuantumRange;
break;
}
case ArctanFunction:
{
double
center,
bias,
range,
slope;
/*
Arctan: slope, center, range, and bias.
*/
slope=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (MagickPI*slope*(QuantumScale*pixel-center));
result=(double) (QuantumRange*(range/MagickPI*atan((double)
result)+bias));
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateFunctionImage(image,function,number_parameters,parameters,
exception) != MagickFalse)
return(MagickTrue);
#endif
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyFunction(q[i],function,number_parameters,parameters,
exception);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FunctionImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageEntropy method is:
%
% MagickBooleanType GetImageEntropy(const Image *image,double *entropy,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*entropy=channel_statistics[CompositePixelChannel].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageExtrema method is:
%
% MagickBooleanType GetImageExtrema(const Image *image,size_t *minima,
% size_t *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageRange(image,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageKurtosis() returns the kurtosis and skewness of one or more image
% channels.
%
% The format of the GetImageKurtosis method is:
%
% MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis,
% double *skewness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*kurtosis=channel_statistics[CompositePixelChannel].kurtosis;
*skewness=channel_statistics[CompositePixelChannel].skewness;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMean() returns the mean and standard deviation of one or more image
% channels.
%
% The format of the GetImageMean method is:
%
% MagickBooleanType GetImageMean(const Image *image,double *mean,
% double *standard_deviation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*mean=channel_statistics[CompositePixelChannel].mean;
*standard_deviation=
channel_statistics[CompositePixelChannel].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageMoments method is:
%
% ChannelMoments *GetImageMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
channels++;
}
return((size_t) (channels == 0 ? 1 : channels));
}
MagickExport ChannelMoments *GetImageMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
CacheView
*image_view;
ChannelMoments
*channel_moments;
double
M00[MaxPixelChannels+1],
M01[MaxPixelChannels+1],
M02[MaxPixelChannels+1],
M03[MaxPixelChannels+1],
M10[MaxPixelChannels+1],
M11[MaxPixelChannels+1],
M12[MaxPixelChannels+1],
M20[MaxPixelChannels+1],
M21[MaxPixelChannels+1],
M22[MaxPixelChannels+1],
M30[MaxPixelChannels+1];
PointInfo
centroid[MaxPixelChannels+1];
ssize_t
channel,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) memset(channel_moments,0,(MaxPixelChannels+1)*
sizeof(*channel_moments));
(void) memset(centroid,0,sizeof(centroid));
(void) memset(M00,0,sizeof(M00));
(void) memset(M01,0,sizeof(M01));
(void) memset(M02,0,sizeof(M02));
(void) memset(M03,0,sizeof(M03));
(void) memset(M10,0,sizeof(M10));
(void) memset(M11,0,sizeof(M11));
(void) memset(M12,0,sizeof(M12));
(void) memset(M20,0,sizeof(M20));
(void) memset(M21,0,sizeof(M21));
(void) memset(M22,0,sizeof(M22));
(void) memset(M30,0,sizeof(M30));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M00[channel]+=QuantumScale*p[i];
M00[MaxPixelChannels]+=QuantumScale*p[i];
M10[channel]+=x*QuantumScale*p[i];
M10[MaxPixelChannels]+=x*QuantumScale*p[i];
M01[channel]+=y*QuantumScale*p[i];
M01[MaxPixelChannels]+=y*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
if (M00[channel] < MagickEpsilon)
{
M00[channel]+=MagickEpsilon;
centroid[channel].x=(double) image->columns/2.0;
centroid[channel].y=(double) image->rows/2.0;
continue;
}
M00[channel]+=MagickEpsilon;
centroid[channel].x=M10[channel]/M00[channel];
centroid[channel].y=M01[channel]/M00[channel];
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute the image moments.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
M00[MaxPixelChannels]/=GetImageChannels(image);
M01[MaxPixelChannels]/=GetImageChannels(image);
M02[MaxPixelChannels]/=GetImageChannels(image);
M03[MaxPixelChannels]/=GetImageChannels(image);
M10[MaxPixelChannels]/=GetImageChannels(image);
M11[MaxPixelChannels]/=GetImageChannels(image);
M12[MaxPixelChannels]/=GetImageChannels(image);
M20[MaxPixelChannels]/=GetImageChannels(image);
M21[MaxPixelChannels]/=GetImageChannels(image);
M22[MaxPixelChannels]/=GetImageChannels(image);
M30[MaxPixelChannels]/=GetImageChannels(image);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0*
M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon)));
if (fabs(M11[channel]) < MagickEpsilon)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
else
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y/
(channel_moments[channel].ellipse_axis.x+MagickEpsilon)));
channel_moments[channel].ellipse_intensity=M00[channel]/
(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0);
M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0);
M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0);
M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0);
M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0);
M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0);
M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0);
M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0);
M00[channel]=1.0;
}
image_view=DestroyCacheView(image_view);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].invariant[0]=M20[channel]+M02[channel];
channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+
M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImagePerceptualHash method is:
%
% ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*perceptual_hash;
char
*colorspaces,
*q;
const char
*artifact;
MagickBooleanType
status;
register char
*p;
register ssize_t
i;
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
MaxPixelChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
artifact=GetImageArtifact(image,"phash:colorspaces");
if (artifact != NULL)
colorspaces=AcquireString(artifact);
else
colorspaces=AcquireString("sRGB,HCLp");
perceptual_hash[0].number_colorspaces=0;
perceptual_hash[0].number_channels=0;
q=colorspaces;
for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++)
{
ChannelMoments
*moments;
Image
*hash_image;
size_t
j;
ssize_t
channel,
colorspace;
if (i >= MaximumNumberOfPerceptualColorspaces)
break;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p);
if (colorspace < 0)
break;
perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace;
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
break;
hash_image->depth=8;
status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace,
exception);
if (status == MagickFalse)
break;
moments=GetImageMoments(hash_image,exception);
perceptual_hash[0].number_colorspaces++;
perceptual_hash[0].number_channels+=GetImageChannels(hash_image);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
break;
for (channel=0; channel <= MaxPixelChannels; channel++)
for (j=0; j < MaximumNumberOfImageMoments; j++)
perceptual_hash[channel].phash[i][j]=
(-MagickLog10(moments[channel].invariant[j]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
}
colorspaces=DestroyString(colorspaces);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageRange() returns the range of one or more image channels.
%
% The format of the GetImageRange method is:
%
% MagickBooleanType GetImageRange(const Image *image,double *minima,
% double *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima,
double *maxima,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
initialize,
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
initialize=MagickTrue;
*maxima=0.0;
*minima=0.0;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,initialize) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
row_maxima = 0.0,
row_minima = 0.0;
MagickBooleanType
row_initialize;
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
row_initialize=MagickTrue;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (row_initialize != MagickFalse)
{
row_minima=(double) p[i];
row_maxima=(double) p[i];
row_initialize=MagickFalse;
}
else
{
if ((double) p[i] < row_minima)
row_minima=(double) p[i];
if ((double) p[i] > row_maxima)
row_maxima=(double) p[i];
}
}
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageRange)
#endif
{
if (initialize != MagickFalse)
{
*minima=row_minima;
*maxima=row_maxima;
initialize=MagickFalse;
}
else
{
if (row_minima < *minima)
*minima=row_minima;
if (row_maxima > *maxima)
*maxima=row_maxima;
}
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageStatistics() returns statistics for each channel in the image. The
% statistics include the channel depth, its minima, maxima, mean, standard
% deviation, kurtosis and skewness. You can access the red channel mean, for
% example, like this:
%
% channel_statistics=GetImageStatistics(image,exception);
% red_mean=channel_statistics[RedPixelChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageStatistics method is:
%
% ChannelStatistics *GetImageStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
*histogram,
standard_deviation;
MagickStatusType
status;
QuantumAny
range;
register ssize_t
i;
size_t
depth;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(
MaxPixelChannels+1,sizeof(*channel_statistics));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (double *) NULL))
{
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,(MaxPixelChannels+1)*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[channel].depth;
range=GetQuantumRange(depth);
status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),
range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[channel].depth++;
i--;
continue;
}
}
if ((double) p[i] < channel_statistics[channel].minima)
channel_statistics[channel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[channel].maxima)
channel_statistics[channel].maxima=(double) p[i];
channel_statistics[channel].sum+=p[i];
channel_statistics[channel].sum_squared+=(double) p[i]*p[i];
channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i];
channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*
p[i];
channel_statistics[channel].area++;
if ((double) p[i] < channel_statistics[CompositePixelChannel].minima)
channel_statistics[CompositePixelChannel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima)
channel_statistics[CompositePixelChannel].maxima=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum((double) p[i]))+i]++;
channel_statistics[CompositePixelChannel].sum+=(double) p[i];
channel_statistics[CompositePixelChannel].sum_squared+=(double)
p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_cubed+=(double)
p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_fourth_power+=(double)
p[i]*p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].area++;
}
p+=GetPixelChannels(image);
}
}
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal(channel_statistics[i].area);
channel_statistics[i].sum*=area;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area-
1.0)*channel_statistics[i].area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
number_bins;
register ssize_t
j;
/*
Compute pixel entropy.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
number_bins=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
if (histogram[GetPixelChannels(image)*j+i] > 0.0)
number_bins++;
area=PerceptibleReciprocal(channel_statistics[channel].area);
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
count;
count=area*histogram[GetPixelChannels(image)*j+i];
channel_statistics[channel].entropy+=-count*MagickLog10(count)*
PerceptibleReciprocal(MagickLog10(number_bins));
channel_statistics[CompositePixelChannel].entropy+=-count*
MagickLog10(count)*PerceptibleReciprocal(MagickLog10(number_bins))/
GetPixelChannels(image);
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
channel_statistics[CompositePixelChannel].mean=0.0;
channel_statistics[CompositePixelChannel].standard_deviation=0.0;
channel_statistics[CompositePixelChannel].entropy=0.0;
for (i=0; i < (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[CompositePixelChannel].mean+=
channel_statistics[i].mean;
channel_statistics[CompositePixelChannel].standard_deviation+=
channel_statistics[i].standard_deviation;
channel_statistics[CompositePixelChannel].entropy+=
channel_statistics[i].entropy;
}
channel_statistics[CompositePixelChannel].mean/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].standard_deviation/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].entropy/=(double)
GetImageChannels(image);
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict polynomial_pixels;
size_t
number_images;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register ssize_t
i,
x;
register PixelChannels
*polynomial_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_pixel=polynomial_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
polynomial_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
if (j >= (ssize_t) number_terms)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
MagickRealType
coefficient,
degree;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(polynomial_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
coefficient=(MagickRealType) terms[2*j];
degree=(MagickRealType) terms[(j << 1)+1];
polynomial_pixel[x].channel[i]+=coefficient*
pow(QuantumScale*GetPixelChannel(image,channel,p),degree);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,PolynomialImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _SkipNode
{
size_t
next[9],
count,
signature;
} SkipNode;
typedef struct _SkipList
{
ssize_t
level;
SkipNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed;
SkipList
skip_list;
size_t
signature;
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
if (pixel_list->skip_list.nodes != (SkipNode *) NULL)
pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory(
pixel_list->skip_list.nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) memset((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->skip_list.nodes));
if (pixel_list->skip_list.nodes == (SkipNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) memset(pixel_list->skip_list.nodes,0,65537UL*
sizeof(*pixel_list->skip_list.nodes));
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) memset(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const size_t color)
{
register SkipList
*p;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
p=(&pixel_list->skip_list);
p->nodes[color].signature=pixel_list->signature;
p->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=p->level; level >= 0; level--)
{
while (p->nodes[search].next[level] < color)
search=p->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (p->level+2))
level=p->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > p->level)
{
p->level++;
update[p->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
p->nodes[color].next[level]=p->nodes[update[level]].next[level];
p->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static inline void GetMaximumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
maximum;
ssize_t
count;
/*
Find the maximum value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
maximum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) maximum);
}
static inline void GetMeanPixelList(PixelList *pixel_list,Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the mean value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sum);
}
static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the median value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
do
{
color=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetMinimumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
minimum;
ssize_t
count;
/*
Find the minimum value for each of the color.
*/
p=(&pixel_list->skip_list);
count=0;
color=65536UL;
minimum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) minimum);
}
static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
max_count,
mode;
ssize_t
count;
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
p=(&pixel_list->skip_list);
color=65536L;
mode=color;
max_count=p->nodes[mode].count;
count=0;
do
{
color=p->nodes[color].next[0];
if (p->nodes[color].count > max_count)
{
mode=color;
max_count=p->nodes[mode].count;
}
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) mode);
}
static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
next,
previous;
ssize_t
count;
/*
Finds the non peak value for each of the colors.
*/
p=(&pixel_list->skip_list);
color=65536L;
next=p->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetRootMeanSquarePixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the root mean square value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) (p->nodes[color].count*color*color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum));
}
static inline void GetStandardDeviationPixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum,
sum_squared;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the standard-deviation value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
for (i=0; i < (ssize_t) p->nodes[color].count; i++)
sum_squared+=((double) color)*((double) color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum_squared-(sum*sum)));
}
static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(pixel);
signature=pixel_list->skip_list.nodes[index].signature;
if (signature == pixel_list->signature)
{
pixel_list->skip_list.nodes[index].count++;
return;
}
AddNodePixelList(pixel_list,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register SkipNode
*root;
register SkipList
*p;
/*
Reset the skip-list.
*/
p=(&pixel_list->skip_list);
root=p->nodes+65536UL;
p->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
ssize_t
center,
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,0,0,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(statistic_image,DirectClass,exception);
if (status == MagickFalse)
{
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1));
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))*
(MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y-
(ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1),
MagickMax(height,1),exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
Quantum
pixel;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(statistic_traits == UndefinedPixelTrait))
continue;
if (((statistic_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(statistic_image,channel,p[center+i],q);
continue;
}
if ((statistic_traits & UpdatePixelTrait) == 0)
continue;
pixels=p;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) MagickMax(height,1); v++)
{
for (u=0; u < (ssize_t) MagickMax(width,1); u++)
{
InsertPixelList(pixels[i],pixel_list[id]);
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
switch (type)
{
case GradientStatistic:
{
double
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=(double) pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=(double) pixel;
pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum));
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
GetRootMeanSquarePixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
SetPixelChannel(statistic_image,channel,pixel,q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(statistic_image);
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,StatisticImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
GB_cast_array.c | //------------------------------------------------------------------------------
// GB_cast_array: typecast an array
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Casts an input array Ax to an output array Cx with a different type. The
// two types are always different, so this does not need to handle user-defined
// types. The iso case is not handled; Ax and Cx must be the same size and no
// iso expansion is done.
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_unop__include.h"
#endif
GB_PUBLIC
void GB_cast_array // typecast an array
(
GB_void *Cx, // output array
const GB_Type_code code1, // type code for Cx
GB_void *Ax, // input array
const GB_Type_code code2, // type code for Ax
const int8_t *restrict Ab, // bitmap for Ax
const int64_t anz, // number of entries in Cx and Ax
const int nthreads // number of threads to use
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
if (anz == 0 || Cx == Ax)
{
// if anz is zero: no work to do, and the Ax and Cx pointer may be NULL
// as well. If Cx and Ax are aliased, then no copy is needed.
return ;
}
ASSERT (Cx != NULL) ;
ASSERT (Ax != NULL) ;
ASSERT (anz > 0) ;
ASSERT (GB_code_compatible (code1, code2)) ;
ASSERT (code1 != code2) ;
ASSERT (code1 != GB_UDT_code) ;
//--------------------------------------------------------------------------
// typecast the array
//--------------------------------------------------------------------------
#ifndef GBCOMPACT
//----------------------------------------------------------------------
// define the worker for the switch factory
//----------------------------------------------------------------------
#define GB_unop_apply(zname,xname) \
GB (_unop_apply__identity ## zname ## xname)
#define GB_WORKER(ignore1,zname,ztype,xname,xtype) \
{ \
GrB_Info info = GB_unop_apply (zname,xname) \
((ztype *) Cx, (xtype *) Ax, Ab, anz, nthreads) ; \
if (info == GrB_SUCCESS) return ; \
} \
break ;
//----------------------------------------------------------------------
// launch the switch factory
//----------------------------------------------------------------------
#define GB_EXCLUDE_SAME_TYPES
#include "GB_2type_factory.c"
#endif
//--------------------------------------------------------------------------
// generic worker: only used for GBCOMPACT case
//--------------------------------------------------------------------------
int64_t csize = GB_code_size (code1, 0) ;
int64_t asize = GB_code_size (code2, 0) ;
GB_cast_function cast_A_to_C = GB_cast_factory (code1, code2) ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
// Cx [p] = Ax [p]
cast_A_to_C (Cx +(p*csize), Ax +(p*asize), asize) ;
}
}
|
nbody-block.c | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "timer.h"
#define CACHELINE 64 // size of cache line [bytes]
#define SOFTENING 1e-9f
typedef struct { float *x, *y, *z, *vx, *vy, *vz; } BodySystem;
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
void bodyForce(BodySystem p, float dt, int n, int tileSize) {
for (int tile = 0; tile < n; tile += tileSize) {
int to = tile + tileSize;
if (to > n) to = n;
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < n; i++) {
float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f;
for (int j = tile; j < to; j++) {
float dy = p.y[j] - p.y[i];
float dz = p.z[j] - p.z[i];
float dx = p.x[j] - p.x[i];
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = 1.0f / sqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3;
}
p.vx[i] += dt*Fx; p.vy[i] += dt*Fy; p.vz[i] += dt*Fz;
}
}
}
int main(const int argc, const char** argv) {
int nBodies = 30000;
if (argc > 1) nBodies = atoi(argv[1]);
int tileSize = 24400;
if (tileSize > nBodies) tileSize = nBodies;
const float dt = 0.01f; // time step
const int nIters = 10; // simulation iterations
int bytes = 6*nBodies*sizeof(float);
float *buf = (float*)malloc(bytes);
BodySystem p;
p.x = buf+0*nBodies; p.y = buf+1*nBodies; p.z = buf+2*nBodies;
p.vx = buf+3*nBodies; p.vy = buf+4*nBodies; p.vz = buf+5*nBodies;
randomizeBodies(buf, 6*nBodies); // Init pos / vel data
double totalTime = 0.0;
for (int iter = 1; iter <= nIters; iter++) {
StartTimer();
bodyForce(p, dt, nBodies, tileSize); // compute interbody forces
for (int i = 0 ; i < nBodies; i++) { // integrate position
p.x[i] += p.vx[i]*dt;
p.y[i] += p.vy[i]*dt;
p.z[i] += p.vz[i]*dt;
}
const double tElapsed = GetTimer() / 1000.0;
if (iter > 1) { // First iter is warm up
totalTime += tElapsed;
}
#ifndef SHMOO
printf("Iteration %d: %.3f seconds\n", iter, tElapsed);
#endif
}
double avgTime = totalTime / (double)(nIters-1);
#ifdef SHMOO
printf("%d, %0.3f\n", nBodies, 1e-9 * nBodies * nBodies / avgTime);
#else
printf("Average rate for iterations 2 through %d: %.3f +- %.3f steps per second.\n",
nIters, rate);
printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, 1e-9 * nBodies * nBodies / avgTime);
#endif
free(buf);
}
|
dotproduct_parallel.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
/* Define length of dot product vectors and number of OpenMP threads */
#define VECLEN 100
#define NUMTHREADS 8
int main (int argc, char* argv[])
{
int i, tid, len=VECLEN, threads=NUMTHREADS;
double *a, *b;
double sum, psum;
printf("Starting omp_dotprod_openmp. Using %d threads\n",threads);
/* Assign storage for dot product vectors */
a = (double*) malloc (len*threads*sizeof(double));
b = (double*) malloc (len*threads*sizeof(double));
/* Initialize dot product vectors */
for (i=0; i<len*threads; i++) {
a[i]=1.0;
b[i]=a[i];
}
/* Initialize global sum */
sum = 0.0;
/*
Perform the dot product in an OpenMP parallel region for loop with a sum reduction
For illustration purposes:
- Explicitly sets number of threads
- Each thread keeps track of its partial sum
*/
#pragma omp parallel private(i,tid,psum) num_threads(threads)
{
psum = 0.0;
tid = omp_get_thread_num();
#pragma omp for reduction(+:sum)
for (i=0; i<len*threads; i++)
{
sum += (a[i] * b[i]);
psum = sum;
}
printf("Thread %d partial sum = %f\n",tid, psum);
}
printf ("Done. OpenMP version: sum = %f \n", sum);
free (a);
free (b);
}
|
tparallel.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
int first=0, second=0;
int foo() {
int i, x = 1023;
#pragma omp parallel firstprivate(x) reduction(+:first) if(x>0) num_threads(2)
{
x++;
first += x;
}
#pragma omp parallel firstprivate(x) reduction(+:first) if(0)
{
x++;
first += x;
}
#pragma omp parallel private(i) shared(first) reduction(+:second)
{
second = first;
for (i = 0; i < 16; i++)
second++;
}
omp_set_num_threads(6);
#pragma omp parallel
printf("Thread %d finished the execution of foo\n", omp_get_thread_num());
return(x);
}
int main(int argc, char *argv[]) {
printf("first = %d, second = %d, x = %d\n", first, second, foo());
}
|
pbkdf2-hmac-sha1_fmt_plug.c | /*
* This software is Copyright (c) 2013 magnum and it is hereby released to
* the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pbkdf2_hmac_sha1;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pbkdf2_hmac_sha1);
#else
#include <ctype.h>
#include <string.h>
#include <assert.h>
#include <stdint.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "base64_convert.h"
#include "pbkdf2_hmac_sha1.h"
#include "pbkdf2_hmac_common.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "PBKDF2-HMAC-SHA1"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR
#endif
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(uint32_t)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define PAD_SIZE 64
#define PLAINTEXT_LENGTH 125
static struct custom_salt {
unsigned int length;
unsigned int rounds;
unsigned int use_utf8;
//unsigned int outlen; /* Not used yet */
unsigned char salt[PBKDF2_32_MAX_SALT_SIZE];
} *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[PBKDF2_SHA1_BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *p;
int saltlen;
memset(&cs, 0, sizeof(cs));
ciphertext += PBKDF2_SHA1_TAG_LEN;
cs.use_utf8 = ciphertext[13] == 'S';
cs.rounds = atou(ciphertext);
ciphertext = strchr(ciphertext, '$') + 1;
p = strchr(ciphertext, '$');
saltlen = 0;
memset(cs.salt, 0, sizeof(cs.salt));
while (ciphertext < p) { /** extract salt **/
cs.salt[saltlen++] =
atoi16[ARCH_INDEX(ciphertext[0])] * 16 +
atoi16[ARCH_INDEX(ciphertext[1])];
ciphertext += 2;
}
cs.length = saltlen;
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
#ifdef SSE_GROUP_SZ_SHA1
int lens[SSE_GROUP_SZ_SHA1], i;
unsigned char *pin[SSE_GROUP_SZ_SHA1];
union {
uint32_t *pout[SSE_GROUP_SZ_SHA1];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = crypt_out[index+i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens,
cur_salt->salt, cur_salt->length,
cur_salt->rounds, &(x.poutc),
PBKDF2_SHA1_BINARY_SIZE, 0);
#else
pbkdf2_sha1((const unsigned char*)(saved_key[index]),
strlen(saved_key[index]),
cur_salt->salt, cur_salt->length,
cur_salt->rounds, (unsigned char*)crypt_out[index],
PBKDF2_SHA1_BINARY_SIZE, 0);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], PBKDF2_SHA1_BINARY_SIZE);
}
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
/* Check the FULL binary, just for good measure. There is no chance we'll
have a false positive here but this function is not performance sensitive. */
static int cmp_exact(char *source, int index)
{
return pbkdf2_hmac_sha1_cmp_exact(get_key(index), source, cur_salt->salt, cur_salt->length, cur_salt->rounds);
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->rounds;
}
struct fmt_main fmt_pbkdf2_hmac_sha1 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
PBKDF2_SHA1_BINARY_SIZE,
PBKDF2_32_BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE,
{
"iteration count",
},
{
PBKDF2_SHA1_FORMAT_TAG,
PKCS5S2_TAG,
PK5K2_TAG
},
pbkdf2_hmac_sha1_common_tests
}, {
init,
done,
fmt_default_reset,
pbkdf2_hmac_sha1_prepare,
pbkdf2_hmac_sha1_valid,
pbkdf2_hmac_sha1_split,
pbkdf2_hmac_sha1_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
transform.h | /*!
* Copyright 2018 XGBoost contributors
*/
#ifndef XGBOOST_COMMON_TRANSFORM_H_
#define XGBOOST_COMMON_TRANSFORM_H_
#include <dmlc/omp.h>
#include <xgboost/data.h>
#include <utility>
#include <vector>
#include <type_traits> // enable_if
#include "host_device_vector.h"
#include "common.h"
#include "span.h"
#if defined (__CUDACC__)
#include "device_helpers.cuh"
#endif // defined (__CUDACC__)
namespace xgboost {
namespace common {
constexpr size_t kBlockThreads = 256;
namespace detail {
#if defined(__CUDACC__)
template <typename Functor, typename... SpanType>
__global__ void LaunchCUDAKernel(Functor _func, Range _range,
SpanType... _spans) {
for (auto i : dh::GridStrideRange(*_range.begin(), *_range.end())) {
_func(i, _spans...);
}
}
#endif // defined(__CUDACC__)
} // namespace detail
/*! \brief Do Transformation on HostDeviceVectors.
*
* \tparam CompiledWithCuda A bool parameter used to distinguish compilation
* trajectories, users do not need to use it.
*
* Note: Using Transform is a VERY tricky thing to do. Transform uses template
* argument to duplicate itself into two different types, one for CPU,
* another for CUDA. The trick is not without its flaw:
*
* If you use it in a function that can be compiled by both nvcc and host
* compiler, the behaviour is un-defined! Because your function is NOT
* duplicated by `CompiledWithCuda`. At link time, cuda compiler resolution
* will merge functions with same signature.
*/
template <bool CompiledWithCuda = WITH_CUDA()>
class Transform {
private:
template <typename Functor>
struct Evaluator {
public:
Evaluator(Functor func, Range range, GPUSet devices, bool shard) :
func_(func), range_{std::move(range)},
shard_{shard},
distribution_{std::move(GPUDistribution::Block(devices))} {}
Evaluator(Functor func, Range range, GPUDistribution dist,
bool shard) :
func_(func), range_{std::move(range)}, shard_{shard},
distribution_{std::move(dist)} {}
/*!
* \brief Evaluate the functor with input pointers to HostDeviceVector.
*
* \tparam HDV... HostDeviceVectors type.
* \param vectors Pointers to HostDeviceVector.
*/
template <typename... HDV>
void Eval(HDV... vectors) const {
bool on_device = !distribution_.IsEmpty();
if (on_device) {
LaunchCUDA(func_, vectors...);
} else {
LaunchCPU(func_, vectors...);
}
}
private:
// CUDA UnpackHDV
template <typename T>
Span<T> UnpackHDV(HostDeviceVector<T>* _vec, int _device) const {
auto span = _vec->DeviceSpan(_device);
return span;
}
template <typename T>
Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec, int _device) const {
auto span = _vec->ConstDeviceSpan(_device);
return span;
}
// CPU UnpackHDV
template <typename T>
Span<T> UnpackHDV(HostDeviceVector<T>* _vec) const {
return Span<T> {_vec->HostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
template <typename T>
Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec) const {
return Span<T const> {_vec->ConstHostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
// Recursive unpack for Shard.
template <typename T>
void UnpackShard(GPUDistribution dist, const HostDeviceVector<T> *vector) const {
vector->Shard(dist);
}
template <typename Head, typename... Rest>
void UnpackShard(GPUDistribution dist,
const HostDeviceVector<Head> *_vector,
const HostDeviceVector<Rest> *... _vectors) const {
_vector->Shard(dist);
UnpackShard(dist, _vectors...);
}
#if defined(__CUDACC__)
template <typename std::enable_if<CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
if (shard_)
UnpackShard(distribution_, _vectors...);
GPUSet devices = distribution_.Devices();
size_t range_size = *range_.end() - *range_.begin();
// Extract index to deal with possible old OpenMP.
size_t device_beg = *(devices.begin());
size_t device_end = *(devices.end());
#pragma omp parallel for schedule(static, 1) if (devices.Size() > 1)
for (omp_ulong device = device_beg; device < device_end; ++device) { // NOLINT
// Ignore other attributes of GPUDistribution for spliting index.
// This deals with situation like multi-class setting where
// granularity is used in data vector.
size_t shard_size = GPUDistribution::Block(devices).ShardSize(
range_size, devices.Index(device));
Range shard_range {0, static_cast<Range::DifferenceType>(shard_size)};
dh::safe_cuda(cudaSetDevice(device));
const int GRID_SIZE =
static_cast<int>(dh::DivRoundUp(*(range_.end()), kBlockThreads));
detail::LaunchCUDAKernel<<<GRID_SIZE, kBlockThreads>>>(
_func, shard_range, UnpackHDV(_vectors, device)...);
}
}
#else
/*! \brief Dummy funtion defined when compiling for CPU. */
template <typename std::enable_if<!CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
LOG(FATAL) << "Not part of device code. WITH_CUDA: " << WITH_CUDA();
}
#endif // defined(__CUDACC__)
template <typename... HDV>
void LaunchCPU(Functor func, HDV*... vectors) const {
omp_ulong end = static_cast<omp_ulong>(*(range_.end()));
#pragma omp parallel for schedule(static)
for (omp_ulong idx = 0; idx < end; ++idx) {
func(idx, UnpackHDV(vectors)...);
}
}
private:
/*! \brief Callable object. */
Functor func_;
/*! \brief Range object specifying parallel threads index range. */
Range range_;
/*! \brief Whether sharding for vectors is required. */
bool shard_;
GPUDistribution distribution_;
};
public:
/*!
* \brief Initialize a Transform object.
*
* \tparam Functor A callable object type.
* \return A Evaluator having one method Eval.
*
* \param func A callable object, accepting a size_t thread index,
* followed by a set of Span classes.
* \param range Range object specifying parallel threads index range.
* \param devices GPUSet specifying GPUs to use, when compiling for CPU,
* this should be GPUSet::Empty().
* \param shard Whether Shard for HostDeviceVector is needed.
*/
template <typename Functor>
static Evaluator<Functor> Init(Functor func, Range const range,
GPUSet const devices,
bool const shard = true) {
return Evaluator<Functor> {func, std::move(range), std::move(devices), shard};
}
template <typename Functor>
static Evaluator<Functor> Init(Functor func, Range const range,
GPUDistribution const dist,
bool const shard = true) {
return Evaluator<Functor> {func, std::move(range), std::move(dist), shard};
}
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_TRANSFORM_H_
|
pr27388-3.c | /* PR middle-end/27388 */
/* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-omplower" } */
extern void bar (int);
void
foo (void)
{
int i = 0, j = 0;
#pragma omp parallel firstprivate (i) private (j)
{
#pragma omp for
for (i = 0; i < 2; i++)
bar (i);
#pragma omp for
for (j = 0; j < 2; j++)
bar (j);
}
}
/* { dg-final { scan-tree-dump-times "omp for\[^\\n\]*private" 0 "omplower" } } */
/* { dg-final { cleanup-tree-dump "omplower" } } */
|
GB_unop__expm1_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__expm1_fc32_fc32
// op(A') function: GB_unop_tran__expm1_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_cexpm1f (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cexpm1f (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_cexpm1f (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXPM1 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__expm1_fc32_fc32
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_cexpm1f (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__expm1_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
trmv_x_sky_u_lo_conj.c | #include "alphasparse/kernel.h"
#include "alphasparse/opt.h"
#include "alphasparse/util.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_SKY *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
#ifdef COMPLEX
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
const ALPHA_INT thread_num = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
}
for(ALPHA_INT c = 0; c < n; ++c)
{
const ALPHA_INT col_start = A->pointers[c];
const ALPHA_INT col_end = A->pointers[c + 1];
ALPHA_INT col_indx = 1;
for(ALPHA_INT ai = col_start; ai < col_end; ++ai)
{
ALPHA_INT col_eles = col_end - col_start;
ALPHA_INT r = c - col_eles + col_indx;
if(ai == col_end - 1)
{
alpha_madde(y[r], alpha, x[c]);
}
else
{
ALPHA_Number t;
alpha_mul_3c(t, alpha, A->values[ai]);
alpha_madde(y[r], t, x[c]);
}
col_indx ++;
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
#else
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
#endif
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_SKY *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
#ifdef COMPLEX
return ONAME_omp(alpha, A, x, beta, y);
#else
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
#endif
}
|
as400_des_fmt_plug.c | // AS/400 DES plugin for JtR
// This software is Copyright (c) 2016 Rob Schoemaker (@5up3rUs3r) and Bart Kulach (@bartholozz)
// and it is hereby released to the general public under the following terms:
// Redistribution and use in source and binary forms, with or without
// modification, are permitted.
//
// See http://www.hackthelegacy.org for details and tooling to retrieve hashes from AS/400 systems
//
// Based on RACF cracker patch for JtR by Dhiru Kholia <dhiru.kholia at gmail.com>,
// Nigel Pentland <nigel at nigelpentland.net> and Main Framed <mainframed767 at gmail.com>.
//
// file format => userid:$as400des$*userid*hash
#if FMT_EXTERNS_H
extern struct fmt_main fmt_as400des;
#elif FMT_REGISTERS_H
john_register_one(&fmt_as400des);
#else
#include <openssl/des.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "crc32.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "as400-des"
#define FORMAT_NAME "AS/400 DES"
#define FORMAT_TAG "$as400des$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "DES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 10 /* passwords can not be longer than 10 characters*/
#define CIPHERTEXT_LENGTH 16
#define BINARY_SIZE 8
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static const unsigned char a2e[256] = {
0, 1, 2, 3, 55, 45, 46, 47, 22, 5, 37, 11, 12, 13, 14, 15,
16, 17, 18, 19, 60, 61, 50, 38, 24, 25, 63, 39, 28, 29, 30, 31,
64, 79,127,123, 91,108, 80,125, 77, 93, 92, 78,107, 96, 75, 97,
240,241,242,243,244,245,246,247,248,249,122, 94, 76,126,110,111,
124,193,194,195,196,197,198,199,200,201,209,210,211,212,213,214,
215,216,217,226,227,228,229,230,231,232,233, 74,224, 90, 95,109,
121,129,130,131,132,133,134,135,136,137,145,146,147,148,149,150,
151,152,153,162,163,164,165,166,167,168,169,192,106,208,161, 7,
32, 33, 34, 35, 36, 21, 6, 23, 40, 41, 42, 43, 44, 9, 10, 27,
48, 49, 26, 51, 52, 53, 54, 8, 56, 57, 58, 59, 4, 20, 62,225,
65, 66, 67, 68, 69, 70, 71, 72, 73, 81, 82, 83, 84, 85, 86, 87,
88, 89, 98, 99,100,101,102,103,104,105,112,113,114,115,116,117,
118,119,120,128,138,139,140,141,142,143,144,154,155,156,157,158,
159,160,170,171,172,173,174,175,176,177,178,179,180,181,182,183,
184,185,186,187,188,189,190,191,202,203,204,205,206,207,218,219,
220,221,222,223,234,235,236,237,238,239,250,251,252,253,254,255
};
/* This is a2e[] with each entry XOR 0x55, left-shifted one bit
and finally with odd parity so that DES_set_key_unchecked
can be used directly. This provides about 15% speed up. */
static const unsigned char a2e_precomputed[256] = {
171, 168, 174, 173, 196, 241, 247, 244, 134, 161, 224, 188, 179, 176, 182, 181,
138, 137, 143, 140, 211, 208, 206, 230, 155, 152, 213, 229, 146, 145, 151, 148,
42, 52, 84, 93, 28, 115, 11, 81, 49, 16, 19, 55, 124, 107, 61, 104,
74, 73, 79, 76, 67, 64, 70, 69, 91, 88, 94, 22, 50, 87, 118, 117,
82, 41, 47, 44, 35, 32, 38, 37, 59, 56, 8, 14, 13, 2, 1, 7,
4, 26, 25, 110, 109, 98, 97, 103, 100, 122, 121, 62, 107, 31, 21, 112,
88, 168, 174, 173, 162, 161, 167, 164, 186, 185, 137, 143, 140, 131, 128, 134,
133, 155, 152, 239, 236, 227, 224, 230, 229, 251, 248, 42, 127, 11, 233, 164,
234, 233, 239, 236, 227, 128, 167, 133, 251, 248, 254, 253, 242, 185, 191, 157,
203, 200, 158, 205, 194, 193, 199, 186, 218, 217, 223, 220, 162, 131, 214, 104,
41, 47, 44, 35, 32, 38, 37, 59, 56, 8, 14, 13, 2, 1, 7, 4,
26, 25, 110, 109, 98, 97, 103, 100, 122, 121, 74, 73, 79, 76, 67, 64,
70, 69, 91, 171, 191, 188, 179, 176, 182, 181, 138, 158, 157, 146, 145, 151,
148, 234, 254, 253, 242, 241, 247, 244, 203, 200, 206, 205, 194, 193, 199, 196,
218, 217, 223, 220, 211, 208, 214, 213, 62, 61, 50, 49, 55, 52, 31, 28,
19, 16, 22, 21, 127, 124, 115, 112, 118, 117, 94, 93, 82, 81, 87, 84
};
/* in-place ascii2ebcdic conversion */
static void ascii2ebcdic(unsigned char *str)
{
int i;
int n = strlen((const char*)str);
for (i = 0; i < n; ++i)
str[i] = a2e[str[i]];
}
/* format userid to conform to spec */
static void process_userid(unsigned char *str)
{
int i;
if(strlen((const char*)str)<=8) // if length userid <8 --> rightpad with spaces
{
for (i = strlen((const char*)str); i < 8; ++i)
str[i] = 0x40;
str[8] = 0; /* terminate string */
}
else
{
// if length userid is 9 or 10 --> do bitswitch operation to create userid of length 8
// if length=9, right pad with spaces to length of 10
if(strlen((const char*)str)==9)
{
str[9] = 0x40;
str[10] = 0;
}
str[0] ^= str[8] & 0xC0;
str[1] ^= (str[8] & 0x30) << 2;
str[2] ^= (str[8] & 0x0C) << 4;
str[3] ^= str[8] << 6;
str[4] ^= str[9] & 0xC0;
str[5] ^= (str[9] & 0x30) << 2;
str[6] ^= (str[9] & 0x0C) << 4;
str[7] ^= str[9] << 6;
str[8] = 0; /* terminate string */
}
}
static struct fmt_tests as400_des_tests[] = {
{"$as400des$AAAAAAA*CA2E330B2FD1820E", "AAAAAAAA"},
{"$as400des$AAAAAAAA*062314297C496E0E", "AAAAAAAA"},
{"$as400des$JJJJJJJJ*8B5F0B1D0826D927", "TESTTEST"},
{"$as400des$TTTTTTTT*424B258AF8B9061B", "TESTTEST"},
{"$as400des$A*0F7DE80335E8ED68", "A"},
{"$as400des$OPEN3*EC76FC0DEF5B0A83", "SYS1"},
{"$as400des$TESTTEST*0FF48804F759193F", "TESTTEST"},
{"$as400des$SYSOPR*83845F8EEC7C20D8", "SYSOPR"},
{"$as400des$TCPIP*657889CD0F5D40DF", "SYS1"},
{"$as400des$TESTER*E05AB770EA048421", "TEST"},
{"$as400des$USERNAME*48E3704CF54B79B2", "PASSWORD10"},
{"$as400des$USERNAME*760FECC695DF4E27", "PASSWORD"},
{"$as400des$USERID*AB171E6EE8E98037", "PASSWORD9"},
{"$as400des$ROB*D09E49EB96A1BE45", "SCHOEMAKER"},
{"$as400des$BART*25C0729AA18D1929", "KULACH"},
{NULL}
};
static struct custom_salt {
unsigned char userid[10 + 1];
} *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
p = strtokm(ctcopy, "*"); /* username */
if(!p)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* hash */
goto err;
if (hexlenu(p, &extra) != CIPHERTEXT_LENGTH || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy, *username;
static struct custom_salt cs;
ctcopy += FORMAT_TAG_LEN; /* skip over "$as400des$" */
username = strtokm(ctcopy, "*");
/* process username */
strncpy((char*)cs.userid, username, 10);
cs.userid[10] = 0; // terminate username at 10 bytes
ascii2ebcdic(cs.userid);
process_userid(cs.userid);
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
DES_cblock des_key;
DES_key_schedule schedule;
int i;
int saved_key_length = strlen(saved_key[index]);
if (saved_key_length <= 8) {
/* process key */
for(i = 0; saved_key[index][i]; i++)
des_key[i] = a2e_precomputed[ARCH_INDEX(saved_key[index][i])];
/* replace missing characters in password by (EBCDIC space (0x40) XOR 0x55) << 1 */
while(i < 8)
des_key[i++] = 0x2a;
DES_set_key_unchecked(&des_key, &schedule);
/* do encryption */
DES_ecb_encrypt((const_DES_cblock*)cur_salt->userid, (DES_cblock*)crypt_out[index], &schedule, DES_ENCRYPT);
}
else {
DES_cblock des_key1, des_key2;
DES_key_schedule schedule1, schedule2;
DES_cblock hash_1, hash_2;
unsigned char output[8];
/* process key */
for(i = 0; i < 8; i++)
des_key1[i] = a2e_precomputed[ARCH_INDEX(saved_key[index][i])];
for(i = 0; i < saved_key_length-8; i++)
des_key2[i] = a2e_precomputed[ARCH_INDEX(saved_key[index][8+i])];
/* replace missing characters in password by (EBCDIC space (0x40) XOR 0x55) << 1 */
while(i < 8)
des_key2[i++] = 0x2a;
DES_set_key_unchecked(&des_key1, &schedule1);
DES_ecb_encrypt((const_DES_cblock*)cur_salt->userid, &hash_1, &schedule1, DES_ENCRYPT);
DES_set_key_unchecked(&des_key2, &schedule2);
DES_ecb_encrypt((const_DES_cblock*)cur_salt->userid, &hash_2, &schedule2, DES_ENCRYPT);
for (i = 0; i < 8; i++) {
output[i] = hash_1[i] ^ hash_2[i];
}
memcpy((unsigned char*)crypt_out[index], output, 8);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void as400_des_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_as400des = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_TRUNC | FMT_8_BIT,
{ NULL },
{ FORMAT_TAG },
as400_des_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
as400_des_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
colormap.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR M M AAA PPPP %
% C O O L O O R R MM MM A A P P %
% C O O L O O RRRR M M M AAAAA PPPP %
% C O O L O O R R M M A A P %
% CCCC OOO LLLLL OOO R R M M A A P %
% %
% %
% MagickCore Colormap Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% We use linked-lists because splay-trees do not currently support duplicate
% key / value pairs (.e.g X11 green compliance and SVG green compliance).
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/cache-view.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/client.h"
#include "magick/configure.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/semaphore.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AcquireImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% The format of the AcquireImageColormap method is:
%
% MagickBooleanType AcquireImageColormap(Image *image,const size_t colors)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
*/
MagickExport MagickBooleanType AcquireImageColormap(Image *image,
const size_t colors)
{
register ssize_t
i;
/*
Allocate image colormap.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (colors > MaxColormapSize)
{
image->colors=0;
image->storage_class=DirectClass;
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=MagickMax(colors,1);
if (image->colormap == (PixelPacket *) NULL)
image->colormap=(PixelPacket *) AcquireQuantumMemory(image->colors+1,
sizeof(*image->colormap));
else
image->colormap=(PixelPacket *) ResizeQuantumMemory(image->colormap,
image->colors+1,sizeof(*image->colormap));
if (image->colormap == (PixelPacket *) NULL)
{
image->colors=0;
image->storage_class=DirectClass;
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (i=0; i < (ssize_t) image->colors; i++)
{
size_t
pixel;
pixel=(size_t) (i*(QuantumRange/MagickMax(colors-1,1)));
image->colormap[i].red=(Quantum) pixel;
image->colormap[i].green=(Quantum) pixel;
image->colormap[i].blue=(Quantum) pixel;
image->colormap[i].opacity=OpaqueOpacity;
}
return(SetImageStorageClass(image,PseudoClass));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C y c l e C o l o r m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CycleColormap() displaces an image's colormap by a given number of
% positions. If you cycle the colormap a number of times you can produce
% a psychodelic effect.
%
% The format of the CycleColormapImage method is:
%
% MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o displace: displace the colormap this amount.
%
*/
MagickExport MagickBooleanType CycleColormapImage(Image *image,
const ssize_t displace)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == DirectClass)
(void) SetImageType(image,PaletteType);
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
ssize_t
index;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) (GetPixelIndex(indexes+x)+displace) %
image->colors;
if (index < 0)
index+=(ssize_t) image->colors;
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S o r t C o l o r m a p B y I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SortColormapByIntensity() sorts the colormap of a PseudoClass image by
% decreasing color intensity.
%
% The format of the SortColormapByIntensity method is:
%
% MagickBooleanType SortColormapByIntensity(Image *image)
%
% A description of each parameter follows:
%
% o image: A pointer to an Image structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(const PixelPacket *) x;
color_2=(const PixelPacket *) y;
intensity=PixelPacketIntensity(color_2)-(int) PixelPacketIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport MagickBooleanType SortColormapByIntensity(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
unsigned short
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->storage_class != PseudoClass)
return(MagickTrue);
exception=(&image->exception);
/*
Allocate memory for pixel indexes.
*/
pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors,
sizeof(*pixels));
if (pixels == (unsigned short *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Assign index values to colormap entries.
*/
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(IndexPacket) i;
/*
Sort image colormap by decreasing color popularity.
*/
qsort((void *) image->colormap,(size_t) image->colors,
sizeof(*image->colormap),IntensityCompare);
/*
Update image colormap indexes to sorted colormap order.
*/
for (i=0; i < (ssize_t) image->colors; i++)
pixels[(ssize_t) image->colormap[i].opacity]=(unsigned short) i;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
index;
register ssize_t
x;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(IndexPacket) pixels[(ssize_t) GetPixelIndex(indexes+x)];
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (status == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
pixels=(unsigned short *) RelinquishMagickMemory(pixels);
return(status);
}
|
GB_binop__le_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__le_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__le_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__le_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_uint8)
// A*D function (colscale): GB (_AxD__le_uint8)
// D*A function (rowscale): GB (_DxB__le_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__le_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__le_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_uint8)
// C=scalar+B GB (_bind1st__le_uint8)
// C=scalar+B' GB (_bind1st_tran__le_uint8)
// C=A+scalar GB (_bind2nd__le_uint8)
// C=A'+scalar GB (_bind2nd_tran__le_uint8)
// C type: bool
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_UINT8 || GxB_NO_LE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__le_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vacation.c | /* =============================================================================
*
* vacation.c
*
* =============================================================================
*
* Copyright (C) Stanford University, 2006. All Rights Reserved.
* Author: Chi Cao Minh
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <getopt.h>
#include "client.h"
#include "customer.h"
#include "list.h"
#include "manager.h"
#include "map.h"
#include "memory.h"
#include "operation.h"
#include "random.h"
#include "reservation.h"
#include "thread.h"
#include "timer.h"
#include "tm.h"
#include "types.h"
#include "utility.h"
enum param_types {
PARAM_CLIENTS = (unsigned char)'c',
PARAM_NUMBER = (unsigned char)'n',
PARAM_QUERIES = (unsigned char)'q',
PARAM_RELATIONS = (unsigned char)'r',
PARAM_TRANSACTIONS = (unsigned char)'t',
PARAM_USER = (unsigned char)'u',
};
#define PARAM_DEFAULT_CLIENTS (1)
#define PARAM_DEFAULT_NUMBER (10)
#define PARAM_DEFAULT_QUERIES (90)
#define PARAM_DEFAULT_RELATIONS (1 << 16)
#define PARAM_DEFAULT_TRANSACTIONS (1 << 26)
#define PARAM_DEFAULT_USER (80)
double global_params[256]; /* 256 = ascii limit */
/* =============================================================================
* displayUsage
* =============================================================================
*/
static void
displayUsage (const char* appName)
{
printf("Usage: %s [options]\n", appName);
puts("\nOptions: (defaults)\n");
printf(" c <UINT> Number of [c]lients (%i)\n",
PARAM_DEFAULT_CLIENTS);
printf(" n <UINT> [n]umber of user queries/transaction (%i)\n",
PARAM_DEFAULT_NUMBER);
printf(" q <UINT> Percentage of relations [q]ueried (%i)\n",
PARAM_DEFAULT_QUERIES);
printf(" r <UINT> Number of possible [r]elations (%i)\n",
PARAM_DEFAULT_RELATIONS);
printf(" t <UINT> Number of [t]ransactions (%i)\n",
PARAM_DEFAULT_TRANSACTIONS);
printf(" u <UINT> Percentage of [u]ser transactions (%i)\n",
PARAM_DEFAULT_USER);
exit(1);
}
/* =============================================================================
* setDefaultParams
* =============================================================================
*/
static void
setDefaultParams ()
{
global_params[PARAM_CLIENTS] = PARAM_DEFAULT_CLIENTS;
global_params[PARAM_NUMBER] = PARAM_DEFAULT_NUMBER;
global_params[PARAM_QUERIES] = PARAM_DEFAULT_QUERIES;
global_params[PARAM_RELATIONS] = PARAM_DEFAULT_RELATIONS;
global_params[PARAM_TRANSACTIONS] = PARAM_DEFAULT_TRANSACTIONS;
global_params[PARAM_USER] = PARAM_DEFAULT_USER;
}
/* =============================================================================
* parseArgs
* =============================================================================
*/
static void
parseArgs (long argc, char* const argv[])
{
long i;
long opt;
opterr = 0;
setDefaultParams();
while ((opt = getopt(argc, argv, "c:n:q:r:t:u:")) != -1) {
switch (opt) {
case 'c':
case 'n':
case 'q':
case 'r':
case 't':
case 'u':
global_params[(unsigned char)opt] = atol(optarg);
break;
case '?':
default:
opterr++;
break;
}
}
for (i = optind; i < argc; i++) {
fprintf(stderr, "Non-option argument: %s\n", argv[i]);
opterr++;
}
if (opterr) {
displayUsage(argv[0]);
}
}
/* =============================================================================
* addCustomer
* -- Wrapper function
* =============================================================================
*/
static bool_t
addCustomer (manager_t* managerPtr, long id, long num, long price)
{
return manager_addCustomer_seq(managerPtr, id);
}
/* =============================================================================
* initializeManager
* =============================================================================
*/
static manager_t*
initializeManager ()
{
manager_t* managerPtr;
long i;
long numRelation;
random_t* randomPtr;
long* ids;
bool_t (*manager_add[])( manager_t*, long, long, long) = {
&manager_addCar_seq,
&manager_addFlight_seq,
&manager_addRoom_seq,
&addCustomer
};
long t;
long numTable = sizeof(manager_add) / sizeof(manager_add[0]);
printf("Initializing manager... ");
fflush(stdout);
randomPtr = random_alloc();
assert(randomPtr != NULL);
managerPtr = manager_alloc();
assert(managerPtr != NULL);
numRelation = (long)global_params[PARAM_RELATIONS];
ids = (long*)SEQ_MALLOC(numRelation * sizeof(long));
for (i = 0; i < numRelation; i++) {
ids[i] = i + 1;
}
for (t = 0; t < numTable; t++) {
/* Shuffle ids */
for (i = 0; i < numRelation; i++) {
long x = random_generate(randomPtr) % numRelation;
long y = random_generate(randomPtr) % numRelation;
long tmp = ids[x];
ids[x] = ids[y];
ids[y] = tmp;
}
/* Populate table */
for (i = 0; i < numRelation; i++) {
bool_t status;
long id = ids[i];
long num = ((random_generate(randomPtr) % 5) + 1) * 100;
long price = ((random_generate(randomPtr) % 5) * 10) + 50;
status = manager_add[t](managerPtr, id, num, price);
assert(status);
}
} /* for t */
puts("done.");
fflush(stdout);
random_free(randomPtr);
SEQ_FREE(ids);
return managerPtr;
}
/* =============================================================================
* initializeClients
* =============================================================================
*/
static client_t**
initializeClients (manager_t* managerPtr)
{
random_t* randomPtr;
client_t** clients;
long i;
long numClient = (long)global_params[PARAM_CLIENTS];
long numTransaction = (long)global_params[PARAM_TRANSACTIONS];
long numTransactionPerClient;
long numQueryPerTransaction = (long)global_params[PARAM_NUMBER];
long numRelation = (long)global_params[PARAM_RELATIONS];
long percentQuery = (long)global_params[PARAM_QUERIES];
long queryRange;
long percentUser = (long)global_params[PARAM_USER];
printf("Initializing clients... ");
fflush(stdout);
randomPtr = random_alloc();
assert(randomPtr != NULL);
clients = (client_t**)SEQ_MALLOC(numClient * sizeof(client_t*));
assert(clients != NULL);
numTransactionPerClient = (long)((double)numTransaction / (double)numClient + 0.5);
queryRange = (long)((double)percentQuery / 100.0 * (double)numRelation + 0.5);
for (i = 0; i < numClient; i++) {
clients[i] = client_alloc(i,
managerPtr,
numTransactionPerClient,
numQueryPerTransaction,
queryRange,
percentUser);
assert(clients[i] != NULL);
}
puts("done.");
printf(" Transactions = %li\n", numTransaction);
printf(" Clients = %li\n", numClient);
printf(" Transactions/client = %li\n", numTransactionPerClient);
printf(" Queries/transaction = %li\n", numQueryPerTransaction);
printf(" Relations = %li\n", numRelation);
printf(" Query percent = %li\n", percentQuery);
printf(" Query range = %li\n", queryRange);
printf(" Percent user = %li\n", percentUser);
fflush(stdout);
random_free(randomPtr);
return clients;
}
/* =============================================================================
* checkTables
* -- some simple checks (not comprehensive)
* -- dependent on tasks generated for clients in initializeClients()
* =============================================================================
*/
static void
checkTables (manager_t* managerPtr)
{
long i;
long numRelation = (long)global_params[PARAM_RELATIONS];
MAP_T* customerTablePtr = managerPtr->customerTablePtr;
MAP_T* tables[] = {
managerPtr->carTablePtr,
managerPtr->flightTablePtr,
managerPtr->roomTablePtr,
};
long numTable = sizeof(tables) / sizeof(tables[0]);
bool_t (*manager_add[])(manager_t*, long, long, long) = {
&manager_addCar_seq,
&manager_addFlight_seq,
&manager_addRoom_seq
};
long t;
printf("Checking tables... ");
fflush(stdout);
/* Check for unique customer IDs */
long percentQuery = (long)global_params[PARAM_QUERIES];
long queryRange = (long)((double)percentQuery / 100.0 * (double)numRelation + 0.5);
long maxCustomerId = queryRange + 1;
for (i = 1; i <= maxCustomerId; i++) {
if (MAP_FIND(customerTablePtr, i)) {
if (MAP_REMOVE(customerTablePtr, i)) {
assert(!MAP_FIND(customerTablePtr, i));
}
}
}
/* Check reservation tables for consistency and unique ids */
for (t = 0; t < numTable; t++) {
MAP_T* tablePtr = tables[t];
for (i = 1; i <= numRelation; i++) {
if (MAP_FIND(tablePtr, i)) {
assert(manager_add[t](managerPtr, i, 0, 0)); /* validate entry */
if (MAP_REMOVE(tablePtr, i)) {
assert(!MAP_REMOVE(tablePtr, i));
}
}
}
}
puts("done.");
fflush(stdout);
}
/* =============================================================================
* freeClients
* =============================================================================
*/
static void
freeClients (client_t** clients)
{
long i;
long numClient = (long)global_params[PARAM_CLIENTS];
for (i = 0; i < numClient; i++) {
client_t* clientPtr = clients[i];
client_free(clientPtr);
}
}
/* =============================================================================
* main
* =============================================================================
*/
MAIN(argc, argv)
{
manager_t* managerPtr;
client_t** clients;
TIMER_T start;
TIMER_T stop;
/* Initialization */
parseArgs(argc, (char** const)argv);
SIM_GET_NUM_CPU(global_params[PARAM_CLIENTS]);
managerPtr = initializeManager();
assert(managerPtr != NULL);
clients = initializeClients(managerPtr);
assert(clients != NULL);
long numThread = global_params[PARAM_CLIENTS];
TM_STARTUP(numThread);
P_MEMORY_STARTUP(numThread);
thread_startup(numThread);
/* Run transactions */
printf("Running clients... ");
fflush(stdout);
GOTO_SIM();
TIMER_READ(start);
#ifdef OTM
#pragma omp parallel
{
client_run(clients);
}
#else
thread_start(client_run, (void*)clients);
#endif
TIMER_READ(stop);
GOTO_REAL();
puts("done.");
printf("Time = %0.6lf\n",
TIMER_DIFF_SECONDS(start, stop));
fflush(stdout);
checkTables(managerPtr);
/* Clean up */
printf("Deallocating memory... ");
fflush(stdout);
freeClients(clients);
/*
* TODO: The contents of the manager's table need to be deallocated.
*/
manager_free(managerPtr);
puts("done.");
fflush(stdout);
TM_SHUTDOWN();
P_MEMORY_SHUTDOWN();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of vacation.c
*
* =============================================================================
*/
|
cancel_worksharing.c | // RUN: %libomp-compile && env OMP_CANCELLATION=true %libomp-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// Current GOMP interface implementation does not support cancellation; icc 16 does not distinguish between sections and loops
// XFAIL: gcc-4, icc-16
#include "callback.h"
#include <unistd.h>
int main()
{
int condition=0;
#pragma omp parallel num_threads(2)
{
int x = 0;
int i;
#pragma omp for
for(i = 0; i < 2; i++)
{
if(i == 0)
{
x++;
OMPT_SIGNAL(condition);
#pragma omp cancel for
}
else
{
x++;
OMPT_WAIT(condition,1);
delay(10000);
#pragma omp cancellation point for
}
}
}
#pragma omp parallel num_threads(2)
{
#pragma omp sections
{
#pragma omp section
{
OMPT_SIGNAL(condition);
#pragma omp cancel sections
}
#pragma omp section
{
OMPT_WAIT(condition,2);
delay(10000);
#pragma omp cancellation point sections
}
}
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_cancel'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_initial_task_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, actual_parallelism=1, index=1, flags=1
// cancel for and sections
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_cancel: task_data=[[TASK_ID:[0-9]+]], flags=ompt_cancel_loop|ompt_cancel_activated=20, codeptr_ra={{0x[0-f]*}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_cancel: task_data=[[TASK_ID:[0-9]+]], flags=ompt_cancel_sections|ompt_cancel_{{activated=18|detected=34}}, codeptr_ra={{0x[0-f]*}}
// CHECK: {{^}}[[OTHER_THREAD_ID:[0-9]+]]: ompt_event_cancel: task_data=[[TASK_ID:[0-9]+]], flags=ompt_cancel_loop|ompt_cancel_detected=36, codeptr_ra={{0x[0-f]*}}
// CHECK: {{^}}[[OTHER_THREAD_ID:[0-9]+]]: ompt_event_cancel: task_data=[[TASK_ID:[0-9]+]], flags=ompt_cancel_sections|ompt_cancel_{{activated=18|detected=34}}, codeptr_ra={{0x[0-f]*}}
return 0;
}
|
matrixstrassen.h | /**
* @file matrixstrassen.h matrix strassen operations.
* @author TPOC: palisade@njit.edu
*
* @copyright Copyright (c) 2017, New Jersey Institute of Technology (NJIT)
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef LBCRYPTO_MATH_MATRIXSTRASSEN_H
#define LBCRYPTO_MATH_MATRIXSTRASSEN_H
#include "matrix.h"
namespace lbcrypto {
template<class Element>
class MatrixStrassen : public Serializable {
public:
typedef vector<vector<unique_ptr<Element>>> data_t;
typedef vector<unique_ptr<Element>> lineardata_t;
typedef typename vector<unique_ptr<Element>>::iterator it_lineardata_t;
typedef std::function<unique_ptr<Element>(void)> alloc_func;
/**
* Constructor that initializes matrix values using a zero allocator
*
* @param &allocZero lambda function for zero initialization.
* @param &rows number of rows.
* @param &rows number of columns.
*/
MatrixStrassen(alloc_func allocZero, size_t rows, size_t cols) : data(), rows(rows), cols(cols), allocZero(allocZero) {
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
/**
* Constructor that initializes matrix values using a distribution generation allocator
*
* @param &allocZero lambda function for zero initialization (used for initializing derived matrix objects)
* @param &rows number of rows.
* @param &rows number of columns.
* @param &allocGen lambda function for intialization using a distribution generator.
*/
MatrixStrassen(alloc_func allocZero, size_t rows, size_t cols, alloc_func allocGen);
/**
* Constructor of an empty matrix; SetSize must be called on this matrix to use it
* Basically this exists to support deserializing
*
* @param &allocZero lambda function for zero initialization.
*/
MatrixStrassen(alloc_func allocZero) : data(), rows(0), cols(0), allocZero(allocZero) {}
void SetSize(size_t rows, size_t cols) {
if( this->rows != 0 || this->cols != 0 )
throw std::logic_error("You cannot SetSize on a non-empty matrix");
this->rows = rows;
this->cols = cols;
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
/**
* Copy constructor
*
* @param &other the matrix object to be copied
*/
MatrixStrassen(const MatrixStrassen<Element>& other) : data(), rows(other.rows), cols(other.cols), allocZero(other.allocZero) {
deepCopyData(other.data);
}
/**
* Assignment operator
*
* @param &other the matrix object whose values are to be copied
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& operator=(const MatrixStrassen<Element>& other);
/**
* In-place change of the current matrix to a matrix of all ones
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& Ones();
/**
* Fill matrix using the same element
*
* @param &val the element the matrix is filled by
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& Fill(const Element &val);
/**
* In-place change of the current matrix to Identity matrix
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& Identity();
/**
* Sets the first row to be powers of two
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element> GadgetVector(int32_t base = 2) const;
/**
* Computes the infinity norm
*
* @return the norm in double format
*/
inline double Norm() const;
/**
* Operator for matrix multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
inline MatrixStrassen<Element> operator*(MatrixStrassen<Element> const& other) const {
return Mult(other);
}
/**
* Multiplication of matrix by a scalar
*
* @param &other the multiplier element
* @return the result of multiplication
*/
inline MatrixStrassen<Element> ScalarMult(Element const& other) const {
MatrixStrassen<Element> result(*this);
#pragma omp parallel for
for (int32_t col = 0; col < result.cols; ++col) {
for (int32_t row = 0; row < result.rows; ++row) {
*result.data[row][col] = *result.data[row][col] * other;
}
}
return result;
}
/**
* Operator for scalar multiplication
*
* @param &other the multiplier element
* @return the result of multiplication
*/
inline MatrixStrassen<Element> operator*(Element const& other) const {
return ScalarMult(other);
}
/**
* Equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
inline bool Equal(MatrixStrassen<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
return false;
}
for (size_t i = 0; i < rows; ++i) {
for (size_t j = 0; j < cols; ++j) {
if (*data[i][j] != *other.data[i][j]) {
return false;
}
}
}
return true;
}
/**
* Operator for equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
inline bool operator==(MatrixStrassen<Element> const& other) const {
return Equal(other);
}
/**
* Operator for non-equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
inline bool operator!=(MatrixStrassen<Element> const& other) const {
return !Equal(other);
}
/**
* Get property to access the data as a vector of vectors
*
* @return the data as vector of vectors
*/
const data_t& GetData() const {
return data;
}
/**
* Get property to access the number of rows in the matrix
*
* @return the number of rows
*/
size_t GetRows() const {
return rows;
}
/**
* Get property to access the number of columns in the matrix
*
* @return the number of columns
*/
size_t GetCols() const {
return cols;
}
/**
* Get property to access the zero allocator for the matrix
*
* @return the lambda function corresponding to the element zero allocator
*/
alloc_func GetAllocator() const {
return allocZero;
}
/**
* Sets the evaluation or coefficient representation for all ring elements that support the SetFormat method
*
* @param &format the enum value corresponding to coefficient or evaluation representation
*/
void SetFormat(Format format);
/**
* MatrixStrassen addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
inline MatrixStrassen<Element> Add(MatrixStrassen<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
throw invalid_argument("Addition operands have incompatible dimensions");
}
MatrixStrassen<Element> result(*this);
#pragma omp parallel for
for (int32_t j = 0; j < cols; ++j) {
for (int32_t i = 0; i < rows; ++i) {
*result.data[i][j] += *other.data[i][j];
}
}
return result;
}
/**
* Operator for matrix addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
inline MatrixStrassen<Element> operator+(MatrixStrassen<Element> const& other) const {
return this->Add(other);
}
/**
* Operator for in-place addition
*
* @param &other the matrix to be added
* @return the resulting matrix (same object)
*/
inline MatrixStrassen<Element>& operator+=(MatrixStrassen<Element> const& other);
/**
* MatrixStrassen substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
inline MatrixStrassen<Element> Sub(MatrixStrassen<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
throw invalid_argument("Subtraction operands have incompatible dimensions");
}
MatrixStrassen<Element> result(allocZero, rows, other.cols);
#pragma omp parallel for
for (int32_t j = 0; j < cols; ++j) {
for (int32_t i = 0; i < rows; ++i) {
*result.data[i][j] = *data[i][j] - *other.data[i][j];
}
}
return result;
}
/**
* Operator for matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
inline MatrixStrassen<Element> operator-(MatrixStrassen<Element> const& other) const {
return this->Sub(other);
}
/**
* Operator for in-place matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix (same object)
*/
inline MatrixStrassen<Element>& operator-=(MatrixStrassen<Element> const& other);
/**
* MatrixStrassen transposition
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element> Transpose() const;
// YSP The signature of this method needs to be changed in the future
/**
* MatrixStrassen determinant - found using Laplace formula with complexity O(d!), where d is the dimension
*
* @param *result where the result is stored
*/
inline void Determinant(Element *result) const;
//inline Element Determinant() const;
/**
* Cofactor matrix - the matrix of determinants of the minors A_{ij} multiplied by -1^{i+j}
*
* @return the cofactor matrix for the given matrix
*/
inline MatrixStrassen<Element> CofactorMatrixStrassen() const;
/**
* Add rows to bottom of the matrix
*
* @param &other the matrix to be added to the bottom of current matrix
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& VStack(MatrixStrassen<Element> const& other);
/**
* Add columns the right of the matrix
*
* @param &other the matrix to be added to the right of current matrix
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& HStack(MatrixStrassen<Element> const& other);
/**
* MatrixStrassen indexing operator - writeable instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
inline Element& operator()(size_t row, size_t col) {
return *data[row][col];
}
/**
* MatrixStrassen indexing operator - read-only instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
inline Element const& operator()(size_t row, size_t col) const {
return *data[row][col];
}
/**
* MatrixStrassen row extractor
*
* @param &row row index
* @return the row at the index
*/
inline MatrixStrassen<Element> ExtractRow(size_t row) const {
MatrixStrassen<Element> result(this->allocZero,1,this->cols);
int i = 0;
for (auto elem = this->GetData()[row].begin(); elem != this->GetData()[row].end(); ++elem) {
result(0,i) = **elem;
i++;
}
return result;
//return *this;
}
/**
* Call switch format for each (ring) element
*
*/
inline void SwitchFormat();
/**
* MatrixStrassen multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
MatrixStrassen<Element> Mult(const MatrixStrassen<Element>& other, int nrec=0, int pad = -1) const;
/*
* Multiply the matrix by a vector whose elements are all 1's. This causes the elements of each
* row of the matrix to be added and placed into the corresponding position in the output vector.
*/
MatrixStrassen<Element> MultByUnityVector() const;
/*
* Multiply the matrix by a vector of random 1's and 0's, which is the same as adding select
* elements in each row together.
* Return a vector that is a rows x 1 matrix.
*/
MatrixStrassen<Element> MultByRandomVector(std::vector<int> ranvec) const;
/**
* Serialize the object into a Serialized
* @param serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool Serialize(Serialized* serObj) const;
/**
* Populate the object from the deserialization of the Serialized
* @param serObj contains the serialized object
* @return true on success
*/
bool Deserialize(const Serialized& serObj);
private:
struct MatDescriptor {
int lda;
int nrec;
int nproc;
int nprocr;
int nprocc;
int nproc_summa;
int bs;
};
const int DESC_SIZE = 7; // number of ints that make up a MatDescriptor
const int rank=0, base=0;
mutable data_t data;
size_t rows;
mutable int rowpad = 0;
size_t cols;
mutable int colpad = 0;
alloc_func allocZero;
mutable char *pattern = NULL;
mutable int numAdd = 0;
mutable int numMult = 0;
mutable int numSub = 0;
mutable MatDescriptor desc;
mutable unique_ptr<Element> zeroUniquePtr = allocZero();
mutable int NUM_THREADS = 1;
void multiplyInternalCAPS( it_lineardata_t A, it_lineardata_t B, it_lineardata_t C, MatDescriptor desc, it_lineardata_t work ) const;
void strassenDFSCAPS( it_lineardata_t A, it_lineardata_t B, it_lineardata_t C, MatDescriptor desc, it_lineardata_t workPassThrough ) const;
void block_multiplyCAPS( it_lineardata_t A, it_lineardata_t B, it_lineardata_t C, MatDescriptor d, it_lineardata_t workPassThrough ) const;
void LinearizeDataCAPS(lineardata_t *lineardataPtr) const;
void UnlinearizeDataCAPS(lineardata_t *lineardataPtr) const;
int getRank() const;
void verifyDescriptor( MatDescriptor desc );
long long numEntriesPerProc( MatDescriptor desc ) const;
//deep copy of data - used for copy constructor
void deepCopyData(data_t const& src);
void getData(const data_t &Adata, const data_t &Bdata, const data_t &Cdata, int row, int inner, int col) const;
void accessUniquePtrCAPS(it_lineardata_t ptr, Element val) const;
void smartSubtractionCAPS(it_lineardata_t result, it_lineardata_t A, it_lineardata_t B) const;
void smartAdditionCAPS(it_lineardata_t result, it_lineardata_t A, it_lineardata_t B) const;
void addMatricesCAPS( int numEntries, it_lineardata_t C, it_lineardata_t A, it_lineardata_t B ) const;
void addSubMatricesCAPS(int numEntries, it_lineardata_t T1, it_lineardata_t S11, it_lineardata_t S12, it_lineardata_t T2,
it_lineardata_t S21, it_lineardata_t S22 ) const;
void subMatricesCAPS( int numEntries, it_lineardata_t C, it_lineardata_t A, it_lineardata_t B ) const;
void tripleAddMatricesCAPS(int numEntries, it_lineardata_t T1, it_lineardata_t S11, it_lineardata_t S12, it_lineardata_t T2,
it_lineardata_t S21, it_lineardata_t S22, it_lineardata_t T3, it_lineardata_t S31, it_lineardata_t S32) const;
void tripleSubMatricesCAPS(int numEntries, it_lineardata_t T1, it_lineardata_t S11, it_lineardata_t S12, it_lineardata_t T2,
it_lineardata_t S21, it_lineardata_t S22, it_lineardata_t T3, it_lineardata_t S31, it_lineardata_t S32) const ;
void distributeFrom1ProcCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I ) const;
void collectTo1ProcCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I ) const;
void sendBlockCAPS( int rank, int target, it_lineardata_t O, int bs, int source, it_lineardata_t I, int ldi ) const;
void receiveBlockCAPS( int rank, int target, it_lineardata_t O, int bs, int source, it_lineardata_t I, int ldo ) const;
void distributeFrom1ProcRecCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I, int ldi ) const;
void collectTo1ProcRecCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I, int ldo ) const;
};
/**
* Operator for scalar multiplication of matrix
*
* @param &e element
* @param &M matrix
* @return the resulting matrix
*/
template<class Element>
inline MatrixStrassen<Element> operator*(Element const& e, MatrixStrassen<Element> const& M) {
return M.ScalarMult(e);
}
/**
* Generates a matrix of rotations. See pages 7-8 of https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
inline MatrixStrassen<BigInteger> Rotate(MatrixStrassen<Poly> const& inMat);
/**
* Each element becomes a square matrix with columns of that element's
* rotations in coefficient form. See pages 7-8 of https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
inline MatrixStrassen<BigVector> RotateVecResult(MatrixStrassen<Poly> const& inMat);
/**
* Stream output operator
*
* @param &os stream
* @param &m matrix to be outputted
* @return the chained stream
*/
template<class Element>
inline std::ostream& operator<<(std::ostream& os, const MatrixStrassen<Element>& m);
/**
* Gives the Choleshky decomposition of the input matrix.
* The assumption is that covariance matrix does not have large coefficients because it is formed by
* discrete gaussians e and s; this implies int32_t can be used
* This algorithm can be further improved - see the Darmstadt paper section 4.4
* http://eprint.iacr.org/2013/297.pdf
*
* @param &input the matrix for which the Cholesky decomposition is to be computed
* @return the resulting matrix of floating-point numbers
*/
inline MatrixStrassen<double> Cholesky(const MatrixStrassen<int32_t> &input);
/**
* Convert a matrix of integers from BigInteger to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
inline MatrixStrassen<int32_t> ConvertToInt32(const MatrixStrassen<BigInteger> &input, const BigInteger& modulus);
/**
* Convert a matrix of BigVector to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
inline MatrixStrassen<int32_t> ConvertToInt32(const MatrixStrassen<BigVector> &input, const BigInteger& modulus);
/**
* Split a vector of int32_t into a vector of ring elements with ring dimension n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
inline MatrixStrassen<Poly> SplitInt32IntoPolyElements(MatrixStrassen<int32_t> const& other, size_t n, const shared_ptr<ILParams> params);
/**
* Another method for splitting a vector of int32_t into a vector of ring elements with ring dimension n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
inline MatrixStrassen<Poly> SplitInt32AltIntoPolyElements(MatrixStrassen<int32_t> const& other, size_t n, const shared_ptr<ILParams> params);
}
#endif // LBCRYPTO_MATH_MATRIXSTRASSEN_H
|
GB_AxB_rowscale_template.c | //------------------------------------------------------------------------------
// GB_AxB_rowscale_template: C=D*B where D is a square diagonal matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// This template is not used If C is iso, since all that is needed is to create
// C as a shallow-copy of the pattern of A.
// B and C can be jumbled. D cannot, but it is a diagonal matrix so it is
// never jumbled.
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
// Bx is unused if the operator is FIRST or PAIR
#include "GB_unused.h"
ASSERT (GB_JUMBLED_OK (C)) ;
ASSERT (!GB_JUMBLED (D)) ;
ASSERT (GB_JUMBLED_OK (B)) ;
ASSERT (!C->iso) ;
//--------------------------------------------------------------------------
// get D and B
//--------------------------------------------------------------------------
const GB_ATYPE *restrict Dx = (GB_ATYPE *) (D_is_pattern ? NULL : D->x) ;
const bool D_iso = D->iso ;
const GB_BTYPE *restrict Bx = (GB_BTYPE *) (B_is_pattern ? NULL : B->x) ;
const bool B_iso = B->iso ;
const int64_t *restrict Bi = B->i ;
const int64_t bnz = GB_nnz (B) ;
const int64_t bvlen = B->vlen ;
//--------------------------------------------------------------------------
// C=D*B
//--------------------------------------------------------------------------
int ntasks = nthreads ;
ntasks = GB_IMIN (bnz, ntasks) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t pstart, pend ;
GB_PARTITION (pstart, pend, bnz, tid, ntasks) ;
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = pstart ; p < pend ; p++)
{
int64_t i = GBI (Bi, p, bvlen) ; // get row index of B(i,j)
GB_GETA (dii, Dx, i, D_iso) ; // dii = D(i,i)
GB_GETB (bij, Bx, p, B_iso) ; // bij = B(i,j)
GB_BINOP (GB_CX (p), dii, bij, 0, 0) ; // C(i,j) = dii*bij
}
}
}
|
master-combined-1.c | void bar (int *);
void
foo (int *a)
{
int i, j, k, u = 0, v = 0, w = 0, x = 0, y = 0, z = 0;
#pragma omp parallel master default(none) private (k)
bar (&k);
#pragma omp parallel default(none) firstprivate(a) shared(x, y, z)
{
#pragma omp master taskloop reduction (+:x) default(none) firstprivate(a)
for (i = 0; i < 64; i++)
x += a[i];
#pragma omp master taskloop simd reduction (+:y) default(none) firstprivate(a) private (i)
for (i = 0; i < 64; i++)
y += a[i];
#pragma omp master taskloop simd collapse(2) reduction (+:z) default(none) firstprivate(a) private (i, j)
for (j = 0; j < 1; j++)
for (i = 0; i < 64; ++i)
z += a[i];
}
#pragma omp parallel master taskloop reduction (+:u) default(none) firstprivate(a)
for (i = 0; i < 64; i++)
u += a[i];
#pragma omp parallel master taskloop simd reduction (+:v) default(none) firstprivate(a)
for (i = 0; i < 64; i++)
v += a[i];
#pragma omp parallel master taskloop simd collapse(2) reduction (+:w) default(none) firstprivate(a)
for (j = 0; j < 1; j++)
for (i = 0; i < 64; ++i)
w += a[i];
}
|
floyd_warshall.c | // Implementation based on
// http://www.cs.virginia.edu/~pact2006/program/pact2006/pact139_han4.pdf
// mixed with BLIS macro kernel.
// New features:
// - Only lower triangle of blocks is considered.
// - Next diagonal block is scheduled as soon as possible, since it is the slowest (real F-W algorithm)
// - Last diagonal block is generally smaller to handle arbitrary size matrices.
#include <omp.h>
#include <stdbool.h>
#include <stdio.h>
// tiles
#define NL1 128
// subtiles
//#define NL2 32
// BLIS macro kernel uses large tiles
#define MC NL1
#define NC NL1
#define KC NL1
// generalized inner product
// new reduction operation
#define min(x,y) (((x)<(y)) ? (x) : (y))
#define asmmin minpd
// zero element of the semiring
// e.g. infinity for min and plus/times
// 0 for plus and times
#define MAX 1000
// unit element for reduction
#define UNIT 1.0
// new pointwise operation
#define add(x,y) ((x)*(y))
#define asmadd mulpd
// redefine these to get other generalized inner products
// for assembly, provide SSE instructions
// avx versions (prefixed with v) are automatically created in AVX kernel
// right now only AVX kernel
#include "kernel_BLIS_avx.c"
// diagonal block (shared)
static double _diag[NL1*NL1] __attribute__ ((aligned (32)));
// next diagonal block
static double _diag2[NL1*NL1] __attribute__ ((aligned (32)));
// horizontal blocks, also used by macro kernel
static double _A[NL1*NL1] __attribute__ ((aligned (32)));
// vertical blocks, also used by macro kernel
static double _B[NL1*NL1] __attribute__ ((aligned (32)));
// temporary matrix for macro kernel
static double _C[MR*NR] __attribute__ ((aligned (32)));
#pragma omp threadprivate(_A, _B, _C)
// BLIS macro kernel
#include "macro.c"
/*
static void show(const double *M, int rows, int cols, int n)
{
int i, j;
for (i=0; i<rows;i++)
{
for (j=0; j<cols;j++)
printf("%.2f ", M[i*n+j]);
printf("\n");
}
printf("\n");
}
*/
//
// versions of Floyd-Warshall on up to three matrices
//
static void diagonal(double *diag, int n)
{
// Update a diagonal block
// diag += diag*diag
// FIXME update lower triangle, then copy results
int i, j, k;
for (k=0;k<n;k++)
for (i=0;i<n;i++)
{
double d = diag[i*n+k];
for (j=0;j<n;j++)
{
double s = add(d, diag[k*n+j]);
diag[i*n+j] = min(diag[i*n+j], s);
}
}
}
/*
static void fwCc(double *C, int n)
{
// n = 32 ??
// 8 registers for C[k,:] - read only
// vbroadcastsd C[i*n+k] - 9th
// two at a time:
// - load C[i, j] (13, 14)
// - add (11, 12)
// - min (13, 14)
// - store
int i, j, k;
for (k=0;k<n;k++)
for (i=0;i<n;i++)
{
double c = C[i*n+k];
for (j=0;j<n;j++)
C[i*n+j] = min(C[i*n+j], add(c, C[k*n+j]));
}
}
*/
static void horizontal(const double *diag, double *horz, int rows, int n)
{
// Update a block from the same row as the current diagonal block
// horz += diag*horz
// The horizontal block may have small number of rows, so the diag is also smaller
int i, j, k;
for (k=0;k<rows;k++)
for (i=0;i<rows;i++)
{
double d = diag[i*rows+k];
for (j=0;j<n;j++)
{
double s = add(d, horz[k*n+j]);
horz[i*n+j] = min(horz[i*n+j], s);
}
}
}
static void vertical(const double *diag, double *vert, int rows, int n)
{
// Update a block from the same column as the current diagonal block
// vert += vert*diag
// The vertical block may have small number of rows
int i, j, k;
for (k=0;k<n;k++)
for (i=0;i<rows;i++)
{
double v = vert[i*n+k];
for (j=0;j<n;j++)
{
double s = add(v, diag[k*n+j]);
vert[i*n+j] = min(vert[i*n+j], s);
}
}
}
/*
// C <- min(C, A+B) - tropical matrix multiplication
// B packed by columns
void fwABC(const double *A, const double *B, double *C, int n, int full_n)
{
for (int i=0;i<n;i++)
for (int j=0;j<n;j++)
{
double c = C[i*full_n+j];
for (int k=0;k<n;k++)
if (c > A[i*n+k]+B[j*n+k])
c = A[i*n+k]+B[j*n+k];
C[i*full_n+j] = c;
}
}
*/
// move a block into contiguous memory
static void pack(double* _M, const double* M, int rows, int cols, int n)
{
int i, j;
for (i=0;i<rows;i++)
for (j=0;j<cols;j++)
_M[i*cols+j] = M[i*n+j];
}
/*
// move a block into contiguous memory column first
void pack_col(double* _M,const double* M, int n)
{
for (int j=0;j<NL1;j++)
for (int i=0;i<NL1;i++)
_M[j*NL1+i] = M[i*n+j];
}
*/
// move a block back to its place
static void unpack(const double* _M, double* M, int rows, int cols, int n)
{
int i, j;
for (i=0;i<rows;i++)
for (j=0;j<cols;j++)
M[i*n+j] = _M[i*cols+j];
}
void fw(double *d, const int n)
{
const int m = (n+NL1-1)/NL1; // number of blocks
// The last diagonal block might be smaller
// It has this many rows:
const int small = n % NL1;
// If small > 0 we may be dealing with the last, smaller block
#ifdef NUMCORE
int numcore = NUMCORE;
#else
int numcore = omp_get_max_threads();
#endif
// first diagonal block
int cols, rows, reduction;
// It might be small if the whole matrix is small
rows = n < NL1 ? small : NL1;
pack(_diag, d, rows, rows, n);
diagonal(_diag, rows);
unpack(_diag, d, rows, rows, n);
if (n < NL1) return;
int i, j, k;
for (k=0;k<m;k++)
{
// diagonal block already in _diag
#pragma omp parallel default(none) shared(_diag, _diag2, k, d) private(i, j, rows, cols, reduction) num_threads(numcore)
#pragma omp single
{
// (k, k+1), (k+1, k) and (k+1, k+1) - new diagonal as soon as possible
if (k+1<m)
{
// FIXME
// k+1 -> always vertical
// diagonal based on vertical and transpose of vertical
// these two could be run by one thread ensuring diagonal is done quickly
//
// We might be working on the last, small row
rows = ((k+1 < m-1) || (small == 0)) ? NL1 : small;
#pragma omp task depend(inout:d[((k+1)*n+k)*NL1])
{
// vertical in _A, diagonal already in _diag
pack(_A, &d[((k+1)*n+k)*NL1], rows, NL1, n);
vertical(_diag, _A, rows, NL1);
unpack(_A, &d[((k+1)*n+k)*NL1], rows, NL1, n);
}
// now update (k+1, k+1) and run the next diagonal element
#pragma omp task depend(in:d[((k+1)*n+k)*NL1])
{
pack_A(rows, NL1, &d[((k+1)*n+k)*NL1], n, 1, _A);
pack_B(NL1, rows, &d[((k+1)*n+k)*NL1], 1, n, _B); // transposed A
dgemm_macro_kernel(rows, rows, NL1, &d[((k+1)*n+k+1)*NL1], 1, n);
// We can run the next diagonal element
// Can't be placed in _diag yet
pack(_diag2, &d[((k+1)*n+k+1)*NL1], rows, rows, n);
diagonal(_diag2, rows);
unpack(_diag2, &d[((k+1)*n+k+1)*NL1], rows, rows, n);
}
}
// j < k -> horizontal
// If k==m-1, we might be updating the last smller row
rows = ((k < m-1) || (small == 0)) ? NL1 : small;
for (j=0;j<k;j++)
{
#pragma omp task depend(inout:d[(k*n+j)*NL1])
{
// horizontal in _B, diagonal already in _diag
pack(_B, &d[(k*n+j)*NL1], rows, NL1, n);
horizontal(_diag, _B, rows, NL1);
unpack(_B, &d[(k*n+j)*NL1], rows, NL1, n);
}
}
// j > k -> vertical (k+1 already done)
for (j=k+2;j<m;j++)
{
// The last vertical might be small.
rows = ((j < m-1) || (small == 0)) ? NL1 : small;
#pragma omp task depend(inout:d[(j*n+k)*NL1])
{
// vertical in _A, diagonal already in _diag
pack(_A, &d[(j*n+k)*NL1], rows, NL1, n);
vertical(_diag, _A, rows, NL1);
unpack(_A, &d[(j*n+k)*NL1], rows, NL1, n);
}
}
for (i=0;i<m;i++)
{
if (i==k) continue;
// only lower triangle with diagonal (j<=i)
for (j=0;j<=i;j++)
{
if (j==k) continue;
if ((j==k+1) && (i==k+1)) continue;
int indexA = j<k ? (k*n+j)*NL1 : (j*n+k)*NL1;
int indexB = i<k ? (k*n+i)*NL1 : (i*n+k)*NL1;
// The last row might be small
rows = ((i < m-1) || (small == 0)) ? NL1 : small;
// The last column might be small
cols = ((j < m-1) || (small == 0)) ? NL1 : small;
// Large tile can be updated from the last small row
reduction = ((k<m-1) || (small == 0)) ? NL1 : small;
#pragma omp task depend(in:d[indexA], d[indexB])
{
// other blocks
// multiplication of transposed matrices to get column order
// AVX kernel is slightly faster this way
// also make sure to use only lower triangle (by matrix symmetry)
// 1, n -> n, 1 in pack_* switches from column to row storage (transposes)
// transposed horizontal block
if (j<k)
pack_A(NL1, reduction, &d[indexA], 1, n, _A);
else
pack_A(cols, reduction, &d[indexA], n, 1, _A);
// transposed vertical block
if (i<k)
pack_B(reduction, NL1, &d[indexB], n, 1, _B);
else
pack_B(reduction, rows, &d[indexB], 1, n, _B);
dgemm_macro_kernel(cols, rows, reduction, &d[(i*n+j)*NL1], 1, n);
}
}
}
} // single
for (i=0;i<NL1*NL1;i++)
_diag[i] = _diag2[i];
}
// copy lower triangle to upper triangle
for (i=0;i<n;i++)
for (j=0;j<i;j++)
d[j*n+i] = d[i*n+j];
}
|
jacobi7_3_save.c | #define max(a,b) (((a) < (b))? (b) : (a))
#define min(a,b) (((a) < (b))? (a) : (b))
#define _TH_1 2
#include <omp.h>
#define Index3D(_nx,_ny,_i,_j,_k) ((_i)+_nx*((_j)+_ny*(_k)))
void jacobi7_3(const int nx,const int ny,int nz,const double alpha,double* A0,const int timesteps,const double* B,const int ldb,double* Anext,const int ldc) {
double fac;
double* temp_ptr;
int i;int j;int k;int t;
fac = 6.0/(A0[0]*A0[0]);
double* l0;double* lnext;
int k_bk_1;
int j_bk_2;
int i_bk_3;
/*@;BEGIN(Nest2_group3=Nest)@*/for (k_bk_1=1; k_bk_1<nz-1+16*timesteps; k_bk_1+=8)
{
omp_set_num_threads(_TH_1);
#pragma omp parallel
{
/*@;BEGIN(Nest1=Nest)@*/#pragma omp for private(t,k,j,i,l0,lnext,j_bk_2,i_bk_3)
for (t=max(0,(16+(k_bk_1-(nz-1)))/16); t<min(timesteps,(15+k_bk_1)/16); t+=1)
{
/*@;BEGIN(Nest3=Nest)@*/for (j_bk_2=1; j_bk_2<-1+ny; j_bk_2+=32)
{
/*@;BEGIN(Nest4=Nest)@*/for (i_bk_3=1; i_bk_3<-1+nx; i_bk_3+=32)
{
for (k=0; k<min(8,16*t+(-k_bk_1+(-1+nz))); k+=1)
{
for (j=0; j<min(32,-j_bk_2+(-1+ny)); j+=1)
{
for (i=0; i<min(32,-i_bk_3+(-1+nx)); i+=1)
{
if (t%2==0)
{
l0 = A0;
lnext = Anext;
}
else
{
lnext = A0;
l0 = Anext;
}
lnext[Index3D(nx,ny,i_bk_3+i,j_bk_2+j,k+(k_bk_1+-16*t))] = -(l0[Index3D(nx,ny,i_bk_3+i,j_bk_2+j,k+(k_bk_1+-16*t))]*fac)+(l0[Index3D(nx,ny,-1+(i_bk_3+i),j_bk_2+j,k+(k_bk_1+-16*t))]+(l0[Index3D(nx,ny,1+(i_bk_3+i),j_bk_2+j,k+(k_bk_1+-16*t))]+(l0[Index3D(nx,ny,i_bk_3+i,-1+(j_bk_2+j),k+(k_bk_1+-16*t))]+(l0[Index3D(nx,ny,i_bk_3+i,1+(j_bk_2+j),k+(k_bk_1+-16*t))]+(l0[Index3D(nx,ny,i_bk_3+i,j_bk_2+j,1+(k+(k_bk_1+-16*t)))]+l0[Index3D(nx,ny,i_bk_3+i,j_bk_2+j,-1+(k+(k_bk_1+-16*t)))])))));
}
}
}
}
}
}
}
}
}
|
GB_unop__identity_fc64_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_int16)
// op(A') function: GB (_unop_tran__identity_fc64_int16)
// C type: GxB_FC64_t
// A type: int16_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_int16)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matsum.c | #include "matrix.h"
/** \brief Computes element-sum of a matrix
*
* \param[in] A Input matrix
* \return \f$ \textrm{sum}( \mathbf{A} ) \f$
*
*/
mtype mat_sum(MATRIX A)
{
int i, j, m, n;
mtype mn = 0.0;
m = MatCol(A);
n = MatRow(A);
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j) mn += A[i][j];
}
return mn;
}
/** \brief Computes row-sum of a matrix
*
* \param[in] A Input matrix
* \param[in] result Matrix to store the result
* \return \f$ \mathbf{A} \mathbf{1} \f$
*
*/
MATRIX mat_sum_row(MATRIX A, MATRIX result)
{
int i, j, m, n;
m = MatCol(A);
n = MatRow(A);
if(result==NULL) if((result = mat_creat(n, 1, UNDEFINED))==NULL) mat_error(MAT_MALLOC);
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
result[i][0] = 0.0;
for(j=0; j<m; ++j) result[i][0] += A[i][j];
}
return result;
}
/** \brief Computes column-sum of a matrix
*
* \param[in] A Input matrix
* \param[in] result Matrix to store the result
* \return \f$ \mathbf{1}^T \mathbf{A} \f$
*
*/
MATRIX mat_sum_col(MATRIX A, MATRIX result)
{
int i, j, m, n;
m = MatCol(A);
n = MatRow(A);
if(result==NULL) if((result = mat_creat(1, m, UNDEFINED))==NULL) mat_error(MAT_MALLOC);
for(j=0; j<m; ++j) result[0][j] = 0.0;
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j) result[0][j] += A[i][j];
}
return result;
}
/** \brief Computes element-sum of an integer vector
*
* \param[in] A Input integer vector
* \return \f$ \textrm{sum}( A ) \f$
*
*/
int int_vec_sum(INT_VECTOR A)
{
int i, m, mn = 0;
m = Int_VecLen(A);
for(i=0; i<m; ++i) mn += A[i];
return mn;
}
|
nth_fibonacci_mat.c | #include <stdio.h>
#include <stdlib.h>
#include "nth_fibonacci_mat.h"
#include "threads.h"
static void init_mat(mat2 *m) {
mpz_inits(m->a, m->b, m->c, m->d, NULL);
}
static void identity_mat(mat2 *m) {
mpz_set_si(m->a, 1);
mpz_set_si(m->b, 0);
mpz_set_si(m->c, 0);
mpz_set_si(m->d, 1);
}
static void copy_mat(mat2 *dst, mat2 *src) {
mpz_set(dst->a, src->a);
mpz_set(dst->b, src->b);
mpz_set(dst->c, src->c);
mpz_set(dst->d, src->d);
}
static mpz_t *get_mat(mat2 *m, int y, int x) {
/* return &m[y * 2 + x]; */
switch(y * 2 + x) {
case 0: return &m->a;
case 1: return &m->b;
case 2: return &m->c;
case 3: return &m->d;
}
fputs("err: no such index", stderr);
exit(1);
}
static void print_mat(mat2 *m) {
for(size_t y = 0; y < 2; ++y)
for(size_t x = 0; x < 2; ++x)
gmp_printf("(%d, %d) : %Zd\n", y, x, *get_mat(m, y, x));
}
static void clear_mat(mat2 *m) {
mpz_clears(m->a, m->b, m->c, m->d, NULL);
}
static void mul_mat(mat2 *r, mat2 *m, mat2 *p) {
mat2 *buf = r;
mpz_t t1, t2, t3;
mpz_t p1, p2, p3, p4, p5, p6, p7;
mpz_inits(t1, t2, t3, NULL);
mpz_inits(p1, p2, p3, p4, p5, p6, p7, NULL);
// strassen multiplication
#pragma omp parallel
{
{ // 1
mpz_add(t1, p->a, p->d);
mpz_add(p1, m->a, m->d);
mpz_mul(p1, p1, t1);
};
{ // 2
mpz_add(p2, m->c, m->d);
mpz_mul(p2, p2, p->a);
};
{ // 3
mpz_sub(p3, p->b, p->d);
mpz_mul(p3, p3, p->a);
};
{ // 4
mpz_sub(p4, p->c, p->a);
mpz_mul(p4, p4, m->d);
};
{ // 5
mpz_add(p5, m->a, m->b);
mpz_mul(p5, p5, p->d);
};
{ // 6
mpz_add(t2, p->a, p->b);
mpz_sub(p6, m->c, m->a);
mpz_mul(p6, p6, t2);
};
{ // 7
mpz_add(t3, p->c, p->d);
mpz_sub(p7, m->b, m->d);
mpz_mul(p7, p7, t3);
};
}
// result matrix
{
mpz_add(t1, p1, p4);
mpz_sub(r->a, p7, p5);
mpz_add(r->a, r->a, t1);
};
{
mpz_add(r->b, p3, p5);
};
{
mpz_add(r->c, p2, p4);
};
{
mpz_add(t2, p1, p3);
mpz_sub(r->d, p6, p2);
mpz_add(r->d, r->d, t2);
};
mpz_clears(t1, t2, t3, NULL);
mpz_clears(p1, p2, p3, p4, p5, p6, p7, NULL);
}
static mat2 square_mat(mat2 *m) {
mat2 p;
init_mat(&p);
copy_mat(&p, m);
mpz_t aa, ab, ac, bc, bd, cd, dd;
mpz_inits(aa, ab, ac, bc, bd, cd, dd, NULL);
#pragma omp parallel
{
mpz_mul(aa, m->a, m->a);
mpz_mul(ab, m->a, m->b);
mpz_mul(ac, m->a, m->c);
mpz_mul(bc, m->b, m->c);
mpz_mul(bd, m->b, m->d);
mpz_mul(cd, m->c, m->d);
mpz_mul(dd, m->d, m->d);
}
mpz_add(p.a, aa, bc);
mpz_add(p.b, ab, bd);
mpz_add(p.c, ac, cd);
mpz_add(p.d, bc, dd);
mpz_clears(aa, ab, ac, bc, bd, cd, dd, NULL);
clear_mat(m);
return p;
}
static void pow_mat(mat2 *m, size_t power) {
mat2 p;
init_mat(&p);
copy_mat(&p, m);
identity_mat(m);
for(size_t i = 1; i <= power; i <<= 1) {
if(power & i) {
/* printf("POWER %lu\n", i); */
/* print_mat(&p); */
mul_mat(m, m, &p);
/* printf("TOTAL %lu\n", i); */
/* print_mat(m); */
}
p = square_mat(&p);
}
clear_mat(&p);
}
void calc_nth_fib_mat(size_t power, mpz_visitor visitor_func) {
mat2 f;
init_mat(&f);
mpz_set_si(f.a, 1);
mpz_set_si(f.b, 1);
mpz_set_si(f.c, 1);
mpz_set_si(f.d, 0);
pow_mat(&f, power);
visitor_func(&f.a);
clear_mat(&f);
}
|
GB_unop__floor_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__floor_fc64_fc64)
// op(A') function: GB (_unop_tran__floor_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_cfloor (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cfloor (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_cfloor (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FLOOR || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__floor_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_cfloor (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_cfloor (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__floor_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
residualbased_newton_raphson_contact_strategy.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY)
#define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY
/* System Includes */
/* External Includes */
/* Project includes */
#include "contact_structural_mechanics_application_variables.h"
#include "includes/kratos_parameters.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/variables.h"
// Strategies
#include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h"
// Utilities
#include "utilities/variable_utils.h"
#include "utilities/color_utilities.h"
#include "utilities/math_utils.h"
#include "custom_utilities/process_factory_utility.h"
#include "custom_utilities/contact_utilities.h"
namespace Kratos {
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedNewtonRaphsonContactStrategy
* @ingroup ContactStructuralMechanicsApplication
* @brief Contact Newton Raphson class
* @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace,
class TDenseSpace, // = DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedNewtonRaphsonContactStrategy :
public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonContactStrategy );
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType;
typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ProcessFactoryUtility::Pointer ProcessesListType;
typedef std::size_t IndexType;
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedNewtonRaphsonContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})"),
ProcessesListType pMyProcesses = nullptr,
ProcessesListType pPostProcesses = nullptr
)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag),
mThisParameters(ThisParameters),
mpMyProcesses(pMyProcesses),
mpPostProcesses(pPostProcesses)
{
KRATOS_TRY;
mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedNewtonRaphsonContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})"),
ProcessesListType pMyProcesses = nullptr,
ProcessesListType pPostProcesses = nullptr
)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ),
mThisParameters(ThisParameters),
mpMyProcesses(pMyProcesses),
mpPostProcesses(pPostProcesses)
{
KRATOS_TRY;
mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* Destructor.
*/
~ResidualBasedNewtonRaphsonContactStrategy() override
= default;
//******************** OPERATIONS ACCESSIBLE FROM THE INPUT: ************************//
//***********************************************************************************//
/**
* @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the
* values of the solution step of interest are assumed equal to the old values
*/
void Predict() override
{
KRATOS_TRY
// Auxiliar zero array
const array_1d<double, 3> zero_array = ZeroVector(3);
// Set to zero the weighted gap
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes();
const bool frictional = r_model_part.Is(SLIP);
// We predict contact pressure in case of contact problem
if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) {
VariableUtils().SetScalarVar<Variable<double>>(WEIGHTED_GAP, 0.0, nodes_array);
if (frictional) {
VariableUtils().SetVectorVar(WEIGHTED_SLIP, zero_array, nodes_array);
}
// Compute the current gap
ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact"));
// We predict a contact pressure
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
const std::size_t step = r_process_info[STEP];
if (step == 1) {
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT);
}
} else {
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
noalias(it_node->Coordinates()) += (it_node->FastGetSolutionStepValue(DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT, 1));
}
}
}
// BaseType::Predict(); // NOTE: May cause problems in dynamics!!!
//
// // Set to zero the weighted gap // NOTE: This can be done during the search if the predict is deactivated
// ModelPart& r_model_part = StrategyBaseType::GetModelPart();
// NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes();
//
// // We predict contact pressure in case of contact problem
// if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) {
// VariableUtils().SetScalarVar<Variable<double>>(WEIGHTED_GAP, 0.0, nodes_array);
//
// // Compute the current gap
// ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact"));
//
// // We predict a contact pressure
// ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
// const double initial_penalty_parameter = r_process_info[INITIAL_PENALTY];
//
// // We iterate over the nodes
// bool is_components = nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) ? false : true;
//
// #pragma omp parallel for
// for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
// auto it_node = nodes_array.begin() + i;
//
// const double current_gap = it_node->FastGetSolutionStepValue(WEIGHTED_GAP);
//
// const double penalty = it_node->Has(INITIAL_PENALTY) ? it_node->GetValue(INITIAL_PENALTY) : initial_penalty_parameter;
//
// if (current_gap < 0.0) {
// it_node->Set(ACTIVE, true);
// if (is_components) {
// it_node->FastGetSolutionStepValue(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) = penalty * current_gap;
// } else {
// const array_1d<double, 3>& normal = it_node->FastGetSolutionStepValue(NORMAL);
// it_node->FastGetSolutionStepValue(VECTOR_LAGRANGE_MULTIPLIER) = penalty * current_gap * normal;
// }
// }
// }
// }
KRATOS_CATCH("")
}
/**
* @brief Initialization of member variables and prior operations
*/
void Initialize() override
{
KRATOS_TRY;
BaseType::Initialize();
mFinalizeWasPerformed = false;
// Initializing NL_ITERATION_NUMBER
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
r_process_info[NL_ITERATION_NUMBER] = 1;
KRATOS_CATCH("");
}
/**
* @brief The problem of interest is solved.
* @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(),
* SolveSolutionStep() and FinalizeSolutionStep().
* All those functions can otherwise be called separately.
*/
double Solve() override
{
this->Initialize();
this->InitializeSolutionStep();
this->Predict();
this->SolveSolutionStep();
this->FinalizeSolutionStep();
// TODO: Add something if necessary
return 0.0;
}
/**
* @brief Performs all the required operations that should be done (for each step)
* before solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
*/
void InitializeSolutionStep() override
{
BaseType::InitializeSolutionStep();
mFinalizeWasPerformed = false;
}
/**
* @brief Performs all the required operations that should be done (for each step)
* after solving the solution step.
*/
void FinalizeSolutionStep() override
{
KRATOS_TRY;
if (mFinalizeWasPerformed == false) {
BaseType::FinalizeSolutionStep();
// To avoid compute twice the FinalizeSolutionStep
mFinalizeWasPerformed = true;
}
KRATOS_CATCH("");
}
/**
* @brief Solves the current step.
* @details This function returns true if a solution has been found, false otherwise.
*/
bool SolveSolutionStep() override
{
KRATOS_TRY;
// bool is_converged = BaseType::SolveSolutionStep(); // FIXME: Requires to separate the non linear iterations
// bool is_converged = BaseSolveSolutionStep(); // Direct solution
bool is_converged = false;
// Getting model part
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
if (r_model_part.IsNot(INTERACTION)) {
// We get the system
TSystemMatrixType& A = *BaseType::mpA;
TSystemVectorType& Dx = *BaseType::mpDx;
TSystemVectorType& b = *BaseType::mpb;
// We get the process info
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
int inner_iteration = 0;
while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) {
++inner_iteration;
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
std::cout << std::endl << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << inner_iteration;;
}
// We solve one loop
r_process_info[NL_ITERATION_NUMBER] = 1;
r_process_info[INNER_LOOP_ITERATION] = inner_iteration;
is_converged = BaseSolveSolutionStep();
// We check the convergence
BaseType::mpConvergenceCriteria->SetEchoLevel(0);
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), A, Dx, b);
BaseType::mpConvergenceCriteria->SetEchoLevel(mConvergenceCriteriaEchoLevel);
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
if (is_converged) std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl;
else std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl;
}
}
} else {
// We compute the base loop
r_model_part.GetProcessInfo()[INNER_LOOP_ITERATION] = 1;
is_converged = BaseSolveSolutionStep();
}
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (!is_converged) {
is_converged = AdaptativeStep();
}
}
return is_converged;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
Parameters mThisParameters; /// The configuration parameters
// ADAPTATIVE STRATEGY PARAMETERS
bool mFinalizeWasPerformed; /// If the FinalizeSolutionStep has been already permformed
ProcessesListType mpMyProcesses; /// The processes list
ProcessesListType mpPostProcesses; /// The post processes list
// OTHER PARAMETERS
int mConvergenceCriteriaEchoLevel; /// The echo level of the convergence criteria
///@}
///@name Protected Operators
///@{
/**
* @brief Solves the current step.
* @details This function returns true if a solution has been found, false otherwise.
*/
bool BaseSolveSolutionStep()
{
KRATOS_TRY;
// Pointers needed in the solution
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
typename TSchemeType::Pointer pScheme = BaseType::GetScheme();
typename TBuilderAndSolverType::Pointer pBuilderAndSolver = BaseType::GetBuilderAndSolver();
TSystemMatrixType& A = *BaseType::mpA;
TSystemVectorType& Dx = *BaseType::mpDx;
TSystemVectorType& b = *BaseType::mpb;
//initializing the parameters of the Newton-Raphson cicle
IndexType iteration_number = 1;
r_process_info[NL_ITERATION_NUMBER] = iteration_number;
bool is_converged = false;
bool residual_is_updated = false;
pScheme->InitializeNonLinIteration(r_model_part, A, Dx, b);
is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, pBuilderAndSolver->GetDofSet(), A, Dx, b);
// We do a geometry check before solve the system for first time
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (CheckGeometryInverted()) {
KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT BEFORE FIRST SOLVE" << std::endl;
r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing
return false;
}
}
// Function to perform the building and the solving phase.
if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) {
TSparseSpace::SetToZero(A);
TSparseSpace::SetToZero(Dx);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildAndSolve(pScheme, r_model_part, A, Dx, b);
} else {
TSparseSpace::SetToZero(Dx); //Dx=0.00;
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHSAndSolve(pScheme, r_model_part, A, Dx, b);
}
// Debugging info
BaseType::EchoInfo(iteration_number);
// Updating the results stored in the database
UpdateDatabase(A, Dx, b, StrategyBaseType::MoveMeshFlag());
// We now check the geometry
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (CheckGeometryInverted()) {
KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl;
r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing
return false;
}
}
pScheme->FinalizeNonLinIteration(r_model_part, A, Dx, b);
if (is_converged) {
//initialisation of the convergence criteria
BaseType::mpConvergenceCriteria->InitializeSolutionStep(r_model_part, pBuilderAndSolver->GetDofSet(), A, Dx, b);
if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, r_model_part, b);
}
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, pBuilderAndSolver->GetDofSet(), A, Dx, b);
}
// Iteration Cicle... performed only for NonLinearProblems
while (is_converged == false && iteration_number++<BaseType::mMaxIterationNumber) {
//setting the number of iteration
r_process_info[NL_ITERATION_NUMBER] = iteration_number;
pScheme->InitializeNonLinIteration(r_model_part, A, Dx, b);
is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, pBuilderAndSolver->GetDofSet(), A, Dx, b);
//call the linear system solver to find the correction mDx for the
//it is not called if there is no system to solve
if (SparseSpaceType::Size(Dx) != 0) {
if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false ) {
if( BaseType::GetKeepSystemConstantDuringIterations() == false) {
//A = 0.00;
TSparseSpace::SetToZero(A);
TSparseSpace::SetToZero(Dx);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildAndSolve(pScheme, r_model_part, A, Dx, b);
}
else {
TSparseSpace::SetToZero(Dx);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHSAndSolve(pScheme, r_model_part, A, Dx, b);
}
}
else {
TSparseSpace::SetToZero(Dx);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHSAndSolve(pScheme, r_model_part, A, Dx, b);
}
} else {
KRATOS_WARNING("No DoFs") << "ATTENTION: no free DOFs!! " << std::endl;
}
// Debugging info
BaseType::EchoInfo(iteration_number);
// Updating the results stored in the database
UpdateDatabase(A, Dx, b, StrategyBaseType::MoveMeshFlag());
// We now check the geometry
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (CheckGeometryInverted()) {
KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl;
r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing
return false;
}
}
pScheme->FinalizeNonLinIteration(r_model_part, A, Dx, b);
residual_is_updated = false;
if (is_converged) {
if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, r_model_part, b);
residual_is_updated = true;
//std::cout << "mb is calculated" << std::endl;
}
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, pBuilderAndSolver->GetDofSet(), A, Dx, b);
}
}
// Plots a warning if the maximum number of iterations is exceeded
if (iteration_number >= BaseType::mMaxIterationNumber && r_model_part.GetCommunicator().MyPID() == 0)
MaxIterationsExceeded();
// Recalculate residual if needed
// (note that some convergence criteria need it to be recalculated)
if (residual_is_updated == false) {
// NOTE:
// The following part will be commented because it is time consuming
// and there is no obvious reason to be here. If someone need this
// part please notify the community via mailing list before uncommenting it.
// Pooyan.
// TSparseSpace::SetToZero(mb);
// pBuilderAndSolver->BuildRHS(pScheme, r_model_part, mb);
}
// Calculate reactions if required
if (BaseType::mCalculateReactionsFlag)
pBuilderAndSolver->CalculateReactions(pScheme, r_model_part, A, Dx, b);
return is_converged;
KRATOS_CATCH("");
}
/**
* @brief This method performs the adaptative step
*/
bool AdaptativeStep()
{
KRATOS_TRY;
bool is_converged = false;
// Plots a warning if the maximum number of iterations is exceeded
if (mpMyProcesses == nullptr && StrategyBaseType::mEchoLevel > 0)
KRATOS_WARNING("No python processes") << "If you have not implemented any method to recalculate BC or loads in function of time, this strategy will be USELESS" << std::endl;
if (mpPostProcesses == nullptr && StrategyBaseType::mEchoLevel > 0)
KRATOS_WARNING("No python post processes") << "If you don't add the postprocesses and the time step if splitted you won't postprocess that steps" << std::endl;
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
const double original_delta_time = r_process_info[DELTA_TIME]; // We save the delta time to restore later
int split_number = 0;
// We iterate until we reach the convergence or we split more than desired
while (is_converged == false && split_number <= mThisParameters["max_number_splits"].GetInt()) {
// Expliting time step as a way to try improve the convergence
split_number += 1;
double aux_delta_time, current_time;
const double aux_time = SplitTimeStep(aux_delta_time, current_time);
current_time += aux_delta_time;
bool inside_the_split_is_converged = false;
IndexType inner_iteration = 0;
while (current_time <= aux_time) {
inner_iteration += 1;
r_process_info[STEP] += 1;
if (inner_iteration == 1) {
if (StrategyBaseType::MoveMeshFlag())
UnMoveMesh();
NodesArrayType& nodes_array = r_model_part.Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
it_node->OverwriteSolutionStepData(1, 0);
// it_node->OverwriteSolutionStepData(2, 1);
}
r_process_info.SetCurrentTime(current_time); // Reduces the time step
FinalizeSolutionStep();
} else {
NodesArrayType& nodes_array = r_model_part.Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i)
(nodes_array.begin() + i)->CloneSolutionStepData();
r_process_info.CloneSolutionStepInfo();
r_process_info.ClearHistory(r_model_part.GetBufferSize());
r_process_info.SetAsTimeStepInfo(current_time); // Sets the new time step
}
// We execute the processes before the non-linear iteration
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteInitializeSolutionStep();
if (mpPostProcesses != nullptr)
mpPostProcesses->ExecuteInitializeSolutionStep();
// In order to initialize again everything
BaseType::mInitializeWasPerformed = false;
mFinalizeWasPerformed = false;
// We repeat the solve with the new DELTA_TIME
this->Initialize();
this->InitializeSolutionStep();
this->Predict();
inside_the_split_is_converged = BaseType::SolveSolutionStep();
this->FinalizeSolutionStep();
// We execute the processes after the non-linear iteration
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteFinalizeSolutionStep();
if (mpPostProcesses != nullptr)
mpPostProcesses->ExecuteFinalizeSolutionStep();
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteBeforeOutputStep();
if (mpPostProcesses != nullptr)
mpPostProcesses->PrintOutput();
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteAfterOutputStep();
current_time += aux_delta_time;
}
if (inside_the_split_is_converged)
is_converged = true;
}
// Plots a warning if the maximum number of iterations and splits are exceeded
if (is_converged == false)
MaxIterationsAndSplitsExceeded();
// Restoring original DELTA_TIME
r_process_info[DELTA_TIME] = original_delta_time;
return is_converged;
KRATOS_CATCH("");
}
/**
* @brief Here the database is updated
* @param A The LHS matrix
* @param Dx The increment of solution after solving system
* @param b The RHS vector
* @param MoveMesh The flag that tells if the mesh should be moved
*/
void UpdateDatabase(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
const bool MoveMesh
) override
{
BaseType::UpdateDatabase(A,Dx,b,MoveMesh);
// TODO: Add something if necessary
}
/**
* @brief his method checks if there is no element inverted
*/
bool CheckGeometryInverted()
{
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
bool inverted_element = false;
ElementsArrayType& elements_array = r_model_part.Elements();
// NOT OMP
for(int i = 0; i < static_cast<int>(elements_array.size()); ++i) {
auto it_elem = elements_array.begin() + i;
auto& geom = it_elem->GetGeometry();
if (geom.DeterminantOfJacobian(0) < 0.0) {
if (mConvergenceCriteriaEchoLevel > 0) {
KRATOS_WATCH(it_elem->Id())
KRATOS_WATCH(geom.DeterminantOfJacobian(0))
}
return true;
}
// We check now the deformation gradient
std::vector<Matrix> deformation_gradient_matrices;
it_elem->GetValueOnIntegrationPoints( DEFORMATION_GRADIENT, deformation_gradient_matrices, r_process_info);
for (IndexType i_gp = 0; i_gp < deformation_gradient_matrices.size(); ++i_gp) {
const double det_f = MathUtils<double>::DetMat(deformation_gradient_matrices[i_gp]);
if (det_f < 0.0) {
if (mConvergenceCriteriaEchoLevel > 0) {
KRATOS_WATCH(it_elem->Id())
KRATOS_WATCH(det_f)
}
return true;
}
}
}
return inverted_element;
}
/**
* @brief Here the time step is splitted
* @param AuxDeltaTime The new delta time to be considered
* @param CurrentTime The current time
* @return The destination time
*/
double SplitTimeStep(
double& AuxDeltaTime,
double& CurrentTime
)
{
KRATOS_TRY;
const double aux_time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME];
AuxDeltaTime = StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME];
CurrentTime = aux_time - AuxDeltaTime;
StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] = CurrentTime; // Restore time to the previous one
AuxDeltaTime /= mThisParameters["split_factor"].GetDouble();
StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] = AuxDeltaTime; // Change delta time
CoutSplittingTime(AuxDeltaTime, aux_time);
return aux_time;
KRATOS_CATCH("");
}
/**
* This method moves bak the mesh to the previous position
*/
void UnMoveMesh()
{
KRATOS_TRY;
if (StrategyBaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(DISPLACEMENT_X) == false)
KRATOS_ERROR << "It is impossible to move the mesh since the DISPLACEMENT var is not in the model_part. Either use SetMoveMeshFlag(False) or add DISPLACEMENT to the list of variables" << std::endl;
NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates();
noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT, 1);
}
KRATOS_CATCH("");
}
/**
* @brief This method returns the defaulr parameters in order to avoid code duplication
* @return Returns the default parameters
*/
Parameters GetDefaultParameters()
{
Parameters default_parameters = Parameters(R"(
{
"adaptative_strategy" : false,
"split_factor" : 10.0,
"max_number_splits" : 3,
"inner_loop_iterations" : 5
})" );
return default_parameters;
}
/**
* @brief This method prints information after solving the problem
*/
void CoutSolvingProblem()
{
if (mConvergenceCriteriaEchoLevel != 0) {
std::cout << "STEP: " << StrategyBaseType::GetModelPart().GetProcessInfo()[STEP] << "\t NON LINEAR ITERATION: " << StrategyBaseType::GetModelPart().GetProcessInfo()[NL_ITERATION_NUMBER] << "\t TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] << "\t DELTA TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] << std::endl;
}
}
/**
* @brief This method prints information after split the increment of time
* @param AuxDeltaTime The new time step to be considered
* @param AuxTime The destination time
*/
void CoutSplittingTime(
const double AuxDeltaTime,
const double AuxTime
)
{
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
const double Time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME];
std::cout.precision(4);
std::cout << "|----------------------------------------------------|" << std::endl;
std::cout << "| " << BOLDFONT("SPLITTING TIME STEP") << " |" << std::endl;
std::cout << "| " << BOLDFONT("COMING BACK TO TIME: ") << std::scientific << Time << " |" << std::endl;
std::cout << "| " << BOLDFONT(" NEW TIME STEP: ") << std::scientific << AuxDeltaTime << " |" << std::endl;
std::cout << "| " << BOLDFONT(" UNTIL TIME: ") << std::scientific << AuxTime << " |" << std::endl;
std::cout << "|----------------------------------------------------|" << std::endl;
}
}
/**
* @brief This method prints information after reach the max number of interations
*/
void MaxIterationsExceeded() override
{
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
std::cout << "|----------------------------------------------------|" << std::endl;
std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl;
std::cout << "|----------------------------------------------------|" << std::endl;
}
}
/**
* @brief This method prints information after reach the max number of interations and splits
*/
void MaxIterationsAndSplitsExceeded()
{
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
std::cout << "|----------------------------------------------------|" << std::endl;
std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl;
std::cout << "| " << BOLDFONT(FRED(" Max number of splits exceeded ")) << " |" << std::endl;
std::cout << "|----------------------------------------------------|" << std::endl;
}
}
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
/**
* Copy constructor.
*/
ResidualBasedNewtonRaphsonContactStrategy(const ResidualBasedNewtonRaphsonContactStrategy& Other)
{
};
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedNewtonRaphsonContactStrategy */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // namespace Kratos
#endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY */
|
rnn_impl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file rnn_impl.h
* \brief
* \author Shu Zhang
*/
#ifndef MXNET_OPERATOR_RNN_IMPL_H_
#define MXNET_OPERATOR_RNN_IMPL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <algorithm>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include "./math.h"
#include "./math_functions-inl.h"
#include "./operator_common.h"
#include "./mshadow_op.h"
#include "./linalg.h"
template<typename DType>
inline DType sigmoid(DType x) {
return 1.0f / (1.0f + exp(-x));
}
template<typename DType>
void LstmForwardTrainingSingleLayer(DType* ws,
DType* rs,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
const Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, 4 * H));
const Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, 4 * H));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[i-1][j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
h[j][k] = ht;
// reserve
y[t][j][k + offset] = ht;
c[i][j][k] = ct;
ifgo[i][j][k][0] = it;
ifgo[i][j][k][1] = ft;
ifgo[i][j][k][2] = gt;
ifgo[i][j][k][3] = ot;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
}
}
}
}
template <typename DType>
void LstmForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr) {
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int cell_size = N * H;
int idx = 0; // state & cell state's idx;
for (int i = 0; i < L; ++i) {
const int input_size = i ? H * D : I;
const int w_size = (input_size + H) * H * 4;
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(rs + y_offset, Shape3(T, N, H * D));
LstmForwardTrainingSingleLayer<DType>(ws, rs, state_outputs, false, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardTrainingSingleLayer<DType>(ws, rs, state_outputs, true, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
x_ptr = y.dptr_;
rs += r_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
memcpy(y_ptr, rs + y_offset, T * N * H * D * sizeof(DType));
}
template<typename DType>
void LstmForwardInferenceSingleLayer(DType* ws,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, H * 4));
Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, H * 4));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> c(h.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
y[t][j][k + offset] = ht;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
} else {
h[j][k] = ht;
c[j][k] = ct;
}
}
}
}
template <typename DType>
void LstmForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr) {
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int cell_size = N * H;
DType* y_tmp_ptr = ws + (T + 1) * cell_size * 4 + cell_size * 2;
DType* y_cur_ptr = y_ptr;
int idx = 0; // state & cell state's idx;
bool flag = L % 2 ? false : true;
for (int i = 0; i < L; ++i) {
const int input_size = i ? H * D : I;
const int w_size = (input_size + H) * H * 4;
// If bidirectional, need space to save current layer output y.
if (D == 2) {
y_cur_ptr = flag ? y_tmp_ptr : y_ptr;
flag = !flag;
}
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(y_cur_ptr, Shape3(T, N, H * D));
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, false, T, N, input_size, H,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
// If bidirectional, then calculate the reverse direction's forward result.
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, true, T, N, input_size, H,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
// Don't need to move pointer in the last layer.
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
x_ptr = y_cur_ptr;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
}
template <typename DType>
void LstmBackwardSingleLayer(DType* ws,
DType* rs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
const Tensor<cpu, 3, DType> &dy,
const Tensor<cpu, 2, DType> &dx,
const Tensor<cpu, 2, DType> &dhx,
const Tensor<cpu, 2, DType> &dcx,
DType* dhy_ptr,
DType* dcy_ptr,
DType* w_ptr,
DType* dw_ptr,
DType* db_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 2, DType> dwx(dw_ptr, Shape2(H * 4, I));
Tensor<cpu, 2, DType> dwh(dw_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 1, DType> dbx(db_ptr, Shape1(H * 4));
Tensor<cpu, 1, DType> dbh(dbx.dptr_ + H * 4, Shape1(H * 4));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
const Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
const Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
memset(dwh.dptr_, 0, H * H * 4 * sizeof(DType));
memset(dbx.dptr_, 0, H * 4 * sizeof(DType));
memset(dbh.dptr_, 0, H * 4 * sizeof(DType));
Tensor<cpu, 4, DType> difgo(ws, Shape4(T, N, 4, H));
Tensor<cpu, 2, DType> dh(ws + T * N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> dc(dh.dptr_ + N * H, Shape2(N, H));
Tensor<cpu, 2, DType> htmp(dc.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta0 = 0.0;
const DType beta1 = 1.0;
const int cell_size = N * H;
if (dhy_ptr != NULL) {
memcpy(dh.dptr_, dhy_ptr, cell_size * sizeof(DType));
}
if (dcy_ptr != NULL) {
memcpy(dc.dptr_, dcy_ptr, cell_size * sizeof(DType));
}
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = T - 1; i >= 0; --i) {
int t = bid ? T - 1 - i : i;
int tnext = bid ? t + 1 : t - 1;
const Tensor<cpu, 2, DType>& dhnext = i ? dh : dhx;
const Tensor<cpu, 2, DType>& dcnext = i ? dc : dcx;
const Tensor<cpu, 2, DType>& hnext = i ? htmp : hx;
const Tensor<cpu, 2, DType>& cnext = i ? c[i - 1] : cx;
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType tc = tanh(c[i][j][k]);
DType it = ifgo[i][j][k][0];
DType ft = ifgo[i][j][k][1];
DType gt = ifgo[i][j][k][2];
DType ot = ifgo[i][j][k][3];
dh[j][k] += dy[t][j][k + offset];
dc[j][k] += dh[j][k] * ot * (1 - tc * tc);
difgo[t][j][0][k] = dc[j][k] * gt * it * (1 - it);
difgo[t][j][1][k] = dc[j][k] * cnext[j][k] * ft * (1 - ft);
difgo[t][j][2][k] = dc[j][k] * it * (1 - gt * gt);
difgo[t][j][3][k] = dh[j][k] * tc * ot * (1 - ot);
dcnext[j][k] = dc[j][k] * ft;
if (i) {
htmp[j][k] = y[tnext][j][k + offset];
}
}
Tensor<cpu, 2, DType> dyh(difgo[t].dptr_, Shape2(N, H * 4));
linalg_gemm(dyh, wh, dhnext, alpha, beta0, false, false);
linalg_gemm(dyh, hnext, dwh, alpha, beta1, true, false);
}
Tensor<cpu, 2, DType> dyx(difgo.dptr_, Shape2(T * N, H * 4));
linalg_gemm(dyx, wx, dx, alpha, bid ? beta1 : beta0, false, false);
linalg_gemm(dyx, x, dwx, alpha, beta0, true, false);
const int row = T * N;
const int col = H * 4;
for (int i = 0; i < row; ++i) {
for (int j = 0; j < col; ++j) {
dbx[j] += dyx[i][j];
dbh[j] = dbx[j];
}
}
}
template <typename DType>
void LstmBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dcy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dcx_ptr,
DType* dw_ptr,
DType* db_ptr) {
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dhx(dhx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dcx(dcx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int w_size1 = (I + H) * H * 4; // first layer
const int w_size2 = (D * H + H) * H * 4; // other layers
const int cell_size = N * H;
DType* dy_tmp_ptr = ws + T * cell_size * 4 + cell_size * 3;
for (int i = L - 1; i >= 0; --i) {
const int input_size = i ? H * D : I;
const int w_size = i ? w_size2 : w_size1;
int idx = i * D;
DType* w_cur_ptr = i ? w_ptr + (w_size1 + (i - 1) * w_size2) * D : w_ptr;
DType* dw_cur_ptr = i ? dw_ptr + (w_size1 + (i - 1) * w_size2) * D : dw_ptr;
DType* db_cur_ptr = db_ptr + i * b_size * D;
DType* rs_cur_ptr = rs + i * r_size;
DType* dhy_cur_ptr = dhy_ptr ? dhy_ptr + i * cell_size * D : NULL;
DType* dcy_cur_ptr = dcy_ptr ? dcy_ptr + i * cell_size * D : NULL;
Tensor<cpu, 3, DType> y(rs_cur_ptr + y_offset, Shape3(T, N, H * D));
Tensor<cpu, 3, DType> dy(dy_ptr, Shape3(T, N, H * D));
Tensor<cpu, 2, DType> x(i ? y.dptr_ - r_size : x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 2, DType> dx(i ? dy_tmp_ptr : dx_ptr, Shape2(T * N, input_size));
LstmBackwardSingleLayer<DType>(ws, rs_cur_ptr, false, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr);
if (D == 2) {
w_cur_ptr += w_size;
dw_cur_ptr += w_size;
db_cur_ptr += b_size;
++idx;
dhy_cur_ptr = dhy_ptr ? dhy_cur_ptr + cell_size : NULL;
dcy_cur_ptr = dcy_ptr ? dcy_cur_ptr + cell_size : NULL;
LstmBackwardSingleLayer<DType>(ws, rs_cur_ptr, true, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr);
}
dy_ptr = dx.dptr_;
}
}
#endif // MXNET_OPERATOR_RNN_IMPL_H_
|
instruments.h | /*
* This file is part of Quantum++.
*
* MIT License
*
* Copyright (c) 2013 - 2019 Vlad Gheorghiu (vgheorgh@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* \file instruments.h
* \brief Measurement functions
*/
#ifndef INSTRUMENTS_H_
#define INSTRUMENTS_H_
namespace qpp {
/**
* \brief Generalized inner product
*
* \param phi Column vector Eigen expression
* \param psi Column vector Eigen expression
* \param subsys Subsystem indexes over which \a phi is defined
* \param dims Dimensions of the multi-partite system
* \return Inner product \f$\langle \phi_{subsys}|\psi\rangle\f$, as a scalar or
* column vector over the remaining Hilbert space
*/
template <typename Derived>
dyn_col_vect<typename Derived::Scalar>
ip(const Eigen::MatrixBase<Derived>& phi, const Eigen::MatrixBase<Derived>& psi,
const std::vector<idx>& subsys, const std::vector<idx>& dims) {
const dyn_col_vect<typename Derived::Scalar>& rphi = phi.derived();
const dyn_col_vect<typename Derived::Scalar>& rpsi = psi.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rphi))
throw exception::ZeroSize("qpp::ip()");
// check zero-size
if (!internal::check_nonzero_size(rpsi))
throw exception::ZeroSize("qpp::ip()");
// check column vector
if (!internal::check_cvector(rphi))
throw exception::MatrixNotCvector("qpp::ip()");
// check column vector
if (!internal::check_cvector(rpsi))
throw exception::MatrixNotCvector("qpp::ip()");
// check that dims is a valid dimension vector
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::ip()");
// check that subsys are valid w.r.t. dims
if (!internal::check_subsys_match_dims(subsys, dims))
throw exception::SubsysMismatchDims("qpp::ip()");
// check that dims match psi column vector
if (!internal::check_dims_match_cvect(dims, rpsi))
throw exception::DimsMismatchCvector("qpp::ip()");
// check that subsys match pho column vector
std::vector<idx> subsys_dims(subsys.size());
for (idx i = 0; i < subsys.size(); ++i)
subsys_dims[i] = dims[subsys[i]];
if (!internal::check_dims_match_cvect(subsys_dims, rphi))
throw exception::DimsMismatchCvector("qpp::ip()");
// END EXCEPTION CHECKS
idx Dsubsys = prod(std::begin(subsys_dims), std::end(subsys_dims));
idx D = static_cast<idx>(rpsi.rows());
idx Dsubsys_bar = D / Dsubsys;
idx n = dims.size();
idx n_subsys = subsys.size();
idx n_subsys_bar = n - n_subsys;
idx Cdims[maxn];
idx Csubsys[maxn];
idx Cdimssubsys[maxn];
idx Csubsys_bar[maxn];
idx Cdimssubsys_bar[maxn];
std::vector<idx> subsys_bar = complement(subsys, n);
std::copy(std::begin(subsys_bar), std::end(subsys_bar),
std::begin(Csubsys_bar));
for (idx i = 0; i < n; ++i) {
Cdims[i] = dims[i];
}
for (idx i = 0; i < n_subsys; ++i) {
Csubsys[i] = subsys[i];
Cdimssubsys[i] = dims[subsys[i]];
}
for (idx i = 0; i < n_subsys_bar; ++i) {
Cdimssubsys_bar[i] = dims[subsys_bar[i]];
}
auto worker = [&](idx b) noexcept->typename Derived::Scalar {
idx Cmidxrow[maxn];
idx Cmidxrowsubsys[maxn];
idx Cmidxcolsubsys_bar[maxn];
/* get the col multi-indexes of the complement */
internal::n2multiidx(b, n_subsys_bar, Cdimssubsys_bar,
Cmidxcolsubsys_bar);
/* write it in the global row multi-index */
for (idx k = 0; k < n_subsys_bar; ++k) {
Cmidxrow[Csubsys_bar[k]] = Cmidxcolsubsys_bar[k];
}
typename Derived::Scalar result = 0;
for (idx a = 0; a < Dsubsys; ++a) {
/* get the row multi-indexes of the subsys */
internal::n2multiidx(a, n_subsys, Cdimssubsys, Cmidxrowsubsys);
/* write it in the global row multi-index */
for (idx k = 0; k < n_subsys; ++k) {
Cmidxrow[Csubsys[k]] = Cmidxrowsubsys[k];
}
// compute the row index
idx i = internal::multiidx2n(Cmidxrow, n, Cdims);
result += std::conj(rphi(a)) * rpsi(i);
}
return result;
}; /* end worker */
dyn_col_vect<typename Derived::Scalar> result(Dsubsys_bar);
#ifdef WITH_OPENMP_
#pragma omp parallel for
#endif // WITH_OPENMP_
for (idx m = 0; m < Dsubsys_bar; ++m)
result(m) = worker(m);
return result;
}
/**
* \brief Generalized inner product
*
* \param phi Column vector Eigen expression
* \param psi Column vector Eigen expression
* \param subsys Subsystem indexes over which \a phi is defined
* \param d Subsystem dimensions
* \return Inner product \f$\langle \phi_{subsys}|\psi\rangle\f$, as a scalar or
* column vector over the remaining Hilbert space
*/
template <typename Derived>
dyn_col_vect<typename Derived::Scalar>
ip(const Eigen::MatrixBase<Derived>& phi, const Eigen::MatrixBase<Derived>& psi,
const std::vector<idx>& subsys, idx d = 2) {
const dyn_col_vect<typename Derived::Scalar>& rphi = phi.derived();
const dyn_col_vect<typename Derived::Scalar>& rpsi = psi.derived();
// EXCEPTION CHECKS
if (!internal::check_nonzero_size(rpsi))
throw exception::ZeroSize("qpp::ip()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::ip()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rpsi.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return ip(phi, psi, subsys, dims);
}
// full measurements
/**
* \brief Measures the state vector or density operator \a A using the set of
* Kraus operators \a Ks
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const std::vector<cmat>& Ks) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check the Kraus operators
if (Ks.empty())
throw exception::ZeroSize("qpp::measure()");
if (!internal::check_square_mat(Ks[0]))
throw exception::MatrixNotSquare("qpp::measure()");
if (Ks[0].rows() != rA.rows())
throw exception::DimsMismatchMatrix("qpp::measure()");
for (auto&& elem : Ks)
if (elem.rows() != Ks[0].rows() || elem.cols() != Ks[0].rows())
throw exception::DimsNotEqual("qpp::measure()");
// END EXCEPTION CHECKS
// probabilities
std::vector<double> prob(Ks.size());
// resulting states
std::vector<cmat> outstates(Ks.size());
//************ density matrix ************//
if (internal::check_square_mat(rA)) // square matrix
{
for (idx i = 0; i < Ks.size(); ++i) {
outstates[i] = cmat::Zero(rA.rows(), rA.rows());
cmat tmp = Ks[i] * rA * adjoint(Ks[i]); // un-normalized;
prob[i] = std::abs(trace(tmp)); // probability
if (prob[i] > 0)
outstates[i] = tmp / prob[i]; // normalized
}
}
//************ ket ************//
else if (internal::check_cvector(rA)) // column vector
{
for (idx i = 0; i < Ks.size(); ++i) {
outstates[i] = ket::Zero(rA.rows());
ket tmp = Ks[i] * rA; // un-normalized;
// probability
prob[i] = std::pow(norm(tmp), 2);
if (prob[i] > 0)
outstates[i] = tmp / std::sqrt(prob[i]); // normalized
}
} else
throw exception::MatrixNotSquareNorCvector("qpp::measure()");
// sample from the probability distribution
std::discrete_distribution<idx> dd(std::begin(prob), std::end(prob));
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
idx result = dd(gen);
return std::make_tuple(result, prob, outstates);
}
// std::initializer_list overload, avoids ambiguity for 2-element lists, see
// http://stackoverflow.com
// /questions/26750039/ambiguity-when-using-initializer-list-as-parameter
/**
* \brief Measures the state vector or density matrix \a A using the set of
* Kraus operators \a Ks
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A,
const std::initializer_list<cmat>& Ks) {
return measure(A, std::vector<cmat>(Ks));
}
/**
* \brief Measures the state vector or density matrix \a A in the orthonormal
* basis specified by the unitary matrix \a U
*
* \param A Eigen expression
* \param U Unitary matrix whose columns represent the measurement basis vectors
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const cmat& U) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check the unitary basis matrix U
if (!internal::check_nonzero_size(U))
throw exception::ZeroSize("qpp::measure()");
if (!internal::check_square_mat(U))
throw exception::MatrixNotSquare("qpp::measure()");
if (U.rows() != rA.rows())
throw exception::DimsMismatchMatrix("qpp::measure()");
// END EXCEPTION CHECKS
std::vector<cmat> Ks(U.rows());
for (idx i = 0; i < static_cast<idx>(U.rows()); ++i)
Ks[i] = U.col(i) * adjoint(U.col(i));
return measure(rA, Ks);
}
// partial measurements
/**
* \brief Measures the part \a subsys of the multi-partite state vector or
* density matrix \a A using the set of Kraus operators \a Ks
* \see qpp::measure_seq()
*
* \note The dimension of all \a Ks must match the dimension of \a target. If
* \a destructive is set to true (by default), the measurement is destructive,
* i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \param target Subsystem indexes that are measured
* \param dims Dimensions of the multi-partite system
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const std::vector<cmat>& Ks,
const std::vector<idx>& target, const std::vector<idx>& dims,
bool destructive = true) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::measure()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::measure()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::measure()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::measure()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::measure()");
std::vector<idx> subsys_dims(target.size());
for (idx i = 0; i < target.size(); ++i)
subsys_dims[i] = dims[target[i]];
idx D = prod(std::begin(dims), std::end(dims));
idx Dsubsys = prod(std::begin(subsys_dims), std::end(subsys_dims));
idx Dsubsys_bar = D / Dsubsys;
// check the Kraus operators
if (Ks.empty())
throw exception::ZeroSize("qpp::measure()");
if (!internal::check_square_mat(Ks[0]))
throw exception::MatrixNotSquare("qpp::measure()");
if (Dsubsys != static_cast<idx>(Ks[0].rows()))
throw exception::DimsMismatchMatrix("qpp::measure()");
for (auto&& elem : Ks)
if (elem.rows() != Ks[0].rows() || elem.cols() != Ks[0].rows())
throw exception::DimsNotEqual("qpp::measure()");
// END EXCEPTION CHECKS
// probabilities
std::vector<double> prob(Ks.size());
// resulting states
std::vector<cmat> outstates;
if (destructive)
outstates.resize(Ks.size(), cmat::Zero(Dsubsys_bar, Dsubsys_bar));
else
outstates.resize(Ks.size(), cmat::Zero(D, D));
//************ ket ************//
if (internal::check_cvector(rA)) // column vector
{
for (idx i = 0; i < Ks.size(); ++i) {
ket tmp = apply(rA, Ks[i], target, dims);
prob[i] = std::pow(norm(tmp), 2);
if (prob[i] > 0) {
// normalized output state
// corresponding to measurement result i
tmp /= std::sqrt(prob[i]);
if (destructive)
outstates[i] = ptrace(tmp, target, dims);
else
outstates[i] = tmp;
}
}
}
//************ density matrix ************//
else // square matrix
{
for (idx i = 0; i < Ks.size(); ++i) {
cmat tmp = apply(rA, Ks[i], target, dims);
if (destructive)
tmp = ptrace(tmp, target, dims);
prob[i] = std::abs(trace(tmp)); // probability
if (prob[i] > 0) {
// normalized output state
// corresponding to measurement result i
outstates[i] = tmp / prob[i];
}
}
}
// sample from the probability distribution
std::discrete_distribution<idx> dd(std::begin(prob), std::end(prob));
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
idx result = dd(gen);
return std::make_tuple(result, prob, outstates);
}
// std::initializer_list overload, avoids ambiguity for 2-element lists, see
// http://stackoverflow.com
// /questions/26750039/ambiguity-when-using-initializer-list-as-parameter
/**
* \brief Measures the part \a target of the multi-partite state vector or
* density matrix \a A using the set of Kraus operators \a Ks
* \see qpp::measure_seq()
*
* \note The dimension of all \a Ks must match the dimension of \a target. If
* \a destructive is set to true (by default), the measurement is destructive,
* i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \param target Subsystem indexes that are measured
* \param dims Dimensions of the multi-partite system
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A,
const std::initializer_list<cmat>& Ks, const std::vector<idx>& target,
const std::vector<idx>& dims, bool destructive = true) {
return measure(A, std::vector<cmat>(Ks), target, dims, destructive);
}
/**
* \brief Measures the part \a target of the multi-partite state vector or
* density matrix \a A using the set of Kraus operators \a Ks
* \see qpp::measure_seq()
*
* \note The dimension of all \a Ks must match the dimension of \a target. If
* \a destructive is set to true (by default), the measurement is destructive,
* i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \param target Subsystem indexes that are measured
* \param d Subsystem dimensions
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const std::vector<cmat>& Ks,
const std::vector<idx>& target, idx d = 2, bool destructive = true) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::measure()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return measure(rA, Ks, target, dims, destructive);
}
// std::initializer_list overload, avoids ambiguity for 2-element lists, see
// http://stackoverflow.com
// /questions/26750039/ambiguity-when-using-initializer-list-as-parameter
/**
* \brief Measures the part \a target of the multi-partite state vector or
* density matrix \a A using the set of Kraus operators \a Ks
* \see qpp::measure_seq()
*
* \note The dimension of all \a Ks must match the dimension of \a target. If
* \a destructive is set to true (by default), the measurement is destructive,
* i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \param target Subsystem indexes that are measured
* \param d Subsystem dimensions
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A,
const std::initializer_list<cmat>& Ks, const std::vector<idx>& target,
idx d = 2, bool destructive = true) {
return measure(A, std::vector<cmat>(Ks), target, d, destructive);
}
/**
* \brief Measures the part \a target of the multi-partite state vector or
* density matrix \a A in the orthonormal basis or rank-1 projectors specified
* by the columns of the matrix \a V
* \see qpp::measure_seq()
*
* \note The dimension of \a V must match the dimension of \a target. If
* \a destructive is set to true (by default), the measurement is destructive,
* i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param V Matrix whose columns represent the measurement basis vectors or the
* bra parts of the rank-1 projectors
* \param target Subsystem indexes that are measured
* \param dims Dimensions of the multi-partite system
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const cmat& V,
const std::vector<idx>& target, const std::vector<idx>& dims,
bool destructive = true) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::measure()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::measure()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::measure()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::measure()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::measure()");
std::vector<idx> subsys_dims(target.size());
for (idx i = 0; i < target.size(); ++i)
subsys_dims[i] = dims[target[i]];
idx Dsubsys = prod(std::begin(subsys_dims), std::end(subsys_dims));
// check the matrix V
if (!internal::check_nonzero_size(V))
throw exception::ZeroSize("qpp::measure()");
if (Dsubsys != static_cast<idx>(V.rows()))
throw exception::DimsMismatchMatrix("qpp::measure()");
// END EXCEPTION CHECKS
// number of basis elements or number of rank-1 projectors
idx M = static_cast<idx>(V.cols());
//************ ket ************//
if (internal::check_cvector(rA)) {
const ket& rpsi = A.derived();
std::vector<double> prob(M); // probabilities
std::vector<cmat> outstates(M); // resulting states
#ifdef WITH_OPENMP_
#pragma omp parallel for
#endif // WITH_OPENMP_
for (idx i = 0; i < M; ++i) {
if (destructive)
outstates[i] =
ip(static_cast<const ket&>(V.col(i)), rpsi, target, dims);
else
outstates[i] = apply(rpsi, prj(V.col(i)), target, dims);
}
for (idx i = 0; i < M; ++i) {
double tmp = norm(outstates[i]);
prob[i] = tmp * tmp;
if (prob[i] > 0) {
// normalized output state
// corresponding to measurement result m
outstates[i] /= tmp;
}
}
// sample from the probability distribution
std::discrete_distribution<idx> dd(std::begin(prob), std::end(prob));
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
idx result = dd(gen);
return std::make_tuple(result, prob, outstates);
}
//************ density matrix ************//
else {
std::vector<cmat> Ks(M);
for (idx i = 0; i < M; ++i)
Ks[i] = V.col(i) * adjoint(V.col(i));
return measure(rA, Ks, target, dims, destructive);
}
}
/**
* \brief Measures the part \a target of the multi-partite state vector or
* density matrix \a A in the orthonormal basis or rank-1 projectors specified
* by the columns of the matrix \a V
* \see qpp::measure_seq()
*
* \note The dimension of \a V must match the dimension of \a target. If
* \a destructive is set to true (by default), the measurement is destructive,
* i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param V Matrix whose columns represent the measurement basis vectors or the
* bra parts of the rank-1 projectors
* \param target Subsystem indexes that are measured
* \param d Subsystem dimensions
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const cmat& V,
const std::vector<idx>& target, idx d = 2, bool destructive = true) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::measure()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return measure(rA, V, target, dims, destructive);
}
/**
* \brief Sequentially measures the part \a target of the multi-partite state
* vector or density matrix \a A in the computational basis
* \see qpp::measure()
*
* \note If \a destructive is set to true (by default), the measurement is
* destructive, i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param target Subsystem indexes that are measured
* \param dims Dimensions of the multi-partite system
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Vector of outcome results of the
* measurement (ordered in increasing order with respect to \a target, i.e.
* first measurement result corresponds to the subsystem with the smallest
* index), 2. Outcome probability, and 3. Post-measurement normalized state
*/
template <typename Derived>
std::tuple<std::vector<idx>, double, cmat>
measure_seq(const Eigen::MatrixBase<Derived>& A, std::vector<idx> target,
std::vector<idx> dims, bool destructive = true) {
// typename std::remove_const<
// typename Eigen::MatrixBase<Derived>::EvalReturnType
// >::type rA = A.derived();
dyn_mat<typename Derived::Scalar> rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure_seq()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::measure_seq()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::measure_seq()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::measure_seq()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::measure_seq()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::measure_seq()");
// END EXCEPTION CHECKS
std::vector<idx> result;
double prob = 1;
// sort target in decreasing order,
// the order of measurements does not matter
std::sort(std::begin(target), std::end(target), std::greater<idx>{});
//************ density matrix or column vector ************//
while (target.size() > 0) {
auto tmp = measure(rA, Gates::get_instance().Id(dims[target[0]]),
{target[0]}, dims, destructive);
result.emplace_back(std::get<0>(tmp));
prob *= std::get<1>(tmp)[std::get<0>(tmp)];
rA = std::get<2>(tmp)[std::get<0>(tmp)];
if (destructive) {
// remove the subsystem
dims.erase(std::next(std::begin(dims), target[0]));
}
target.erase(std::begin(target));
}
// order result in increasing order with respect to target
std::reverse(std::begin(result), std::end(result));
return std::make_tuple(result, prob, rA);
}
/**
* \brief Sequentially measures the part \a target of the multi-partite state
* vector or density matrix \a A in the computational basis
* \see qpp::measure()
*
* \note If \a destructive is set to true (by default), the measurement is
* destructive, i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param target Subsystem indexes that are measured
* \param d Subsystem dimensions
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Vector of outcome results of the
* measurement (ordered in increasing order with respect to \a target, i.e.
* first measurement result corresponds to the subsystem with the smallest
* index), 2. Outcome probability, and 3. Post-measurement normalized state
*/
template <typename Derived>
std::tuple<std::vector<idx>, double, cmat>
measure_seq(const Eigen::MatrixBase<Derived>& A, std::vector<idx> target,
idx d = 2, bool destructive = true) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure_seq()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::measure_seq()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return measure_seq(rA, target, dims, destructive);
}
/**
* \brief Resets qudits from the multi-partite state vector or density matrix
* \a A by performing a non-destructive measurement in the computational basis
* on the \a target qudits and discarding the measurement results, followed by
* shifting them back to the \f$|0\cdots 0\rangle\f$ state
*
* \param A Eigen expression
* \param target Target qudit indexes that are reset
* \param dims Dimensions of the multi-partite system
* \return Reset quantum state
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> reset(const Eigen::MatrixBase<Derived>& A,
std::vector<idx> target,
std::vector<idx> dims) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::reset()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::reset()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::reset()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::reset()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::reset()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::reset()");
// END EXCEPTION CHECKS
dyn_mat<typename Derived::Scalar> result;
std::vector<idx> resZ;
std::tie(resZ, std::ignore, result) = measure_seq(rA, target, dims, false);
for (idx i = 0; i < target.size(); ++i) {
cmat correction =
powm(Gates::get_instance().Xd(dims[i]), dims[i] - resZ[i]);
result = apply(result, correction, {target[i]}, dims);
}
return result;
}
/**
* \brief Resets qudits from the multi-partite state vector or density matrix
* \a A by performing a non-destructive measurement in the computational basis
* on the \a target qudits and discarding the measurement results, followed by
* shifting them back to the \f$|0\cdots 0\rangle\f$ state
*
* \param A Eigen expression
* \param target Target qudit indexes that are reset
* \param d Subsystem dimensions
* \return Reset quantum state
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> reset(const Eigen::MatrixBase<Derived>& A,
std::vector<idx> target, idx d = 2) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::reset()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::reset()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return reset(rA, target, dims);
}
/**
* \brief Discards qudits from the multi-partite state vector or density matrix
* \a A by performing a destructive measurement in the computational basis on
* the \a target qudits and discarding the measurement results
*
* \param A Eigen expression
* \param target Target qudit indexes that are discarded
* \param dims Dimensions of the multi-partite system
* \return Resulting quantum state
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> discard(const Eigen::MatrixBase<Derived>& A,
std::vector<idx> target,
std::vector<idx> dims) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::discard()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::discard()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::discard()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::discard()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::discard()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::discard()");
// END EXCEPTION CHECKS
dyn_mat<typename Derived::Scalar> result;
std::tie(std::ignore, std::ignore, result) = measure_seq(rA, target, dims);
return result;
}
/**
* \brief Discards qudits from the multi-partite state vector or density matrix
* \a A by performing a destructive measurement in the computational basis on
* the \a target qudits and discarding the measurement results
*
* \param A Eigen expression
* \param target Target qudit indexes that are discarded
* \param d Subsystem dimensions
* \return Resulting quantum state
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> discard(const Eigen::MatrixBase<Derived>& A,
std::vector<idx> target, idx d = 2) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::discard()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::discard()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return discard(rA, target, dims);
}
} /* namespace qpp */
#endif /* INSTRUMENTS_H_ */
|
GB_unop__identity_uint32_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint32_uint8)
// op(A') function: GB (_unop_tran__identity_uint32_uint8)
// C type: uint32_t
// A type: uint8_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = (uint32_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint32_uint8)
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint32_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
integrator_sei.c | /**
* @file integrator_sei.c
* @brief Symplectic Epicycle Integrator (SEI).
* @author Hanno Rein <hanno@hanno-rein.de>
* @details This file implements the Symplectic Epicycle Integrator
* (SEI). The integrator is described in detail in Rein & Tremaine 2011.
* It solves epicyclic motion exactly and is therefore exact up to machine
* precision in the limit of no perturbing forces. When perturbing-forces
* are of order eps, then the error of the scheme is O(eps dt^3). It also
* makes use of two shear operators instead of a rotation to minimize
* systematic numerical round-off errors.
*
* @section LICENSE
* Copyright (c) 2011 Hanno Rein, Shangfei Liu
*
* This file is part of rebound.
*
* rebound is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* rebound is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with rebound. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <time.h>
#include "rebound.h"
#include "particle.h"
#include "gravity.h"
#include "boundary.h"
#include "integrator.h"
#include "integrator_sei.h"
static void operator_H012(double dt, const struct reb_simulation_integrator_sei ri_sei, struct reb_particle* p);
static void operator_phi1(double dt, struct reb_particle* p);
void reb_integrator_sei_init(struct reb_simulation* const r){
/**
* Pre-calculates sin() and tan() needed for SEI.
*/
r->ri_sei.sindt = sin(r->ri_sei.OMEGA*(-r->dt/2.));
r->ri_sei.tandt = tan(r->ri_sei.OMEGA*(-r->dt/4.));
r->ri_sei.sindtz = sin(r->ri_sei.OMEGAZ*(-r->dt/2.));
r->ri_sei.tandtz = tan(r->ri_sei.OMEGAZ*(-r->dt/4.));
r->ri_sei.lastdt = r->dt;
}
void reb_integrator_sei_part1(struct reb_simulation* const r){
r->gravity_ignore_terms = 0;
const int N = r->N;
struct reb_particle* const particles = r->particles;
if (r->ri_sei.OMEGAZ==-1){
r->ri_sei.OMEGAZ=r->ri_sei.OMEGA;
}
if (r->ri_sei.lastdt!=r->dt){
reb_integrator_sei_init(r);
}
const struct reb_simulation_integrator_sei ri_sei = r->ri_sei;
#pragma omp parallel for schedule(guided)
for (int i=0;i<N;i++){
operator_H012(r->dt, ri_sei, &(particles[i]));
}
r->t+=r->dt/2.;
}
void reb_integrator_sei_part2(struct reb_simulation* r){
const int N = r->N;
struct reb_particle* const particles = r->particles;
const struct reb_simulation_integrator_sei ri_sei = r->ri_sei;
#pragma omp parallel for schedule(guided)
for (int i=0;i<N;i++){
operator_phi1(r->dt, &(particles[i]));
operator_H012(r->dt, ri_sei, &(particles[i]));
}
r->t+=r->dt/2.;
r->dt_last_done = r->dt;
}
void reb_integrator_sei_synchronize(struct reb_simulation* r){
// Do nothing.
}
void reb_integrator_sei_reset(struct reb_simulation* r){
r->ri_sei.lastdt = 0;
}
/**
* @brief This function evolves a particle under the unperturbed
* Hamiltonian H0 exactly up to machine precission.
* @param p reb_particle to evolve.
* @param dt Timestep
* @param ri_sei Integrator struct
*/
static void operator_H012(double dt, const struct reb_simulation_integrator_sei ri_sei, struct reb_particle* p){
// Integrate vertical motion
const double zx = p->z * ri_sei.OMEGAZ;
const double zy = p->vz;
// Rotation implemeted as 3 shear operators
// to avoid round-off errors
const double zt1 = zx - ri_sei.tandtz*zy;
const double zyt = ri_sei.sindtz*zt1 + zy;
const double zxt = zt1 - ri_sei.tandtz*zyt;
p->z = zxt/ri_sei.OMEGAZ;
p->vz = zyt;
// Integrate motion in xy directions
const double aO = 2.*p->vy + 4.*p->x*ri_sei.OMEGA; // Center of epicyclic motion
const double bO = p->y*ri_sei.OMEGA - 2.*p->vx;
const double ys = (p->y*ri_sei.OMEGA-bO)/2.; // Epicycle vector
const double xs = (p->x*ri_sei.OMEGA-aO);
// Rotation implemeted as 3 shear operators
// to avoid round-off errors
const double xst1 = xs - ri_sei.tandt*ys;
const double yst = ri_sei.sindt*xst1 + ys;
const double xst = xst1 - ri_sei.tandt*yst;
p->x = (xst+aO) /ri_sei.OMEGA;
p->y = (yst*2.+bO) /ri_sei.OMEGA - 3./4.*aO*dt;
p->vx = yst;
p->vy = -xst*2. -3./2.*aO;
}
/**
* @brief This function applies the acceleration due to the PHI1 term.
* @details It is only exact if the forces are velocity independet (i.e. gravity).
* If the forces are velocity dependent, it breaks the symmetry of the scheme,
* making it firsr-order and non-symplectic. As long as these forces are small,
* this should not be visible. However, it is worth keeping in mind.
* @param p reb_particle to evolve.
* @param dt Timestep
*/
static void operator_phi1(double dt, struct reb_particle* p){
// The force used here is for test cases 2 and 3
// in Rein & Tremaine 2011.
p->vx += p->ax * dt;
p->vy += p->ay * dt;
p->vz += p->az * dt;
}
|
labels.h | /*
An Experimental Study on Hub Labeling based Shortest Path Algorithms [Experiments and Analyses]
Authors: Ye Li, Leong Hou U, Man Lung Yiu, Ngai Meng Kou
Contact: yb47438@umac.mo
Affiliation: University of Macau
The MIT License (MIT)
Copyright (c) 2016 University of Macau
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#pragma once
#ifndef LABELS_H
#define LABELS_H
#include <limits>
#include <climits>
#include <stdlib.h>
#include <iostream>
#include <sys/time.h>
#include "graph.h"
#include "paras.h"
#include <malloc.h>
#include <xmmintrin.h>
//typedef unsigned __int64 BPSeed;
#include <omp.h>
#include<bitset>
#define numOfVertices SP_Constants::numOfVertices
#define numOfEdges SP_Constants::numOfEdges
#define INF_WEIGHT SP_Constants::INF_WEIGHT
struct index_t {
vector<NodeID> spt_v;
vector<EdgeWeight> spt_d;
NodeID size() {
return spt_v.size();
}
};
struct index_t_p {
NodeID* spt_v;
EdgeWeight* spt_d;
}__attribute__((aligned(64))); // Aligned for cache lines;
struct two_index_t_p {
NodeID* spt_v;
EdgeWeight* spt_d;
uint8_t* spt_lv;
EdgeWeight* spt_ld;
}__attribute__((aligned(64))); // Aligned for cache lines;
struct index_t_path {
vector<NodeID> spt_v;
vector<NodeID> spt_p;//parent nodes
vector<EdgeWeight> spt_d;
NodeID size() {
return spt_v.size();
}
};
struct index_t_path_p {
NodeID* spt_v;
NodeID* spt_p;
EdgeWeight* spt_d;
};
struct query_info {
NodeID meet_node;
NodeID search_len;
double time_cost;
EdgeWeight distance;
};
template<int kNumBitParallelRoots = 50>
struct index_t_bp {
NodeID* spt_v;
EdgeWeight* spt_d;
EdgeWeight bpspt_d[kNumBitParallelRoots];
uint64_t bpspt_s[kNumBitParallelRoots][2];
}__attribute__((aligned(64))); // Aligned for cache lines;
struct token_t {
NodeID* sptc_v; // sptc_v[0] is the root
EdgeWeight* sptc_d; // |*| = k + 1, sptc_d[0] is the number of children - k
unsigned char* sptc_fbv; // first-level bit vector
unsigned char* sptc_sbv; // second-level bit vector
NodeID* sptc_pathv; // intermediate point for a path
}__attribute__((aligned(64)));
class CLabel {
public:
token_t* supertokenindex_p;
token_t* tokenindex_p;
NodeID* anchor_p;
NodeID numOfTokens;
long total_children;
token_t* r_supertokenindex_p;
token_t* r_tokenindex_p;
NodeID* r_anchor_p;
NodeID r_numOfTokens;
long r_total_children;
void save_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&anchor_p[v], sizeof(anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
ofs.write((const char*)&numOfTokens, sizeof(numOfTokens));
for (NodeID t = 0; t < numOfTokens; ++t) {
token_t& tt = tokenindex_p[t];
EdgeWeight tsize = tt.sptc_d[0];
ofs.write((const char*)&tt.sptc_v[0], sizeof(tt.sptc_v[0]));
ofs.write((const char*)&tsize, sizeof(tsize));
for(NodeID c = 0; c < tsize; ++c){
ofs.write((const char*)&tt.sptc_v[1 + c], sizeof(tt.sptc_v[1 + c]));
ofs.write((const char*)&tt.sptc_d[1 + c], sizeof(tt.sptc_d[1 + c]));
}
}
ofs.close();
}
void save_labels_path(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&anchor_p[v], sizeof(anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
ofs.write((const char*)&numOfTokens, sizeof(numOfTokens));
for (NodeID t = 0; t < numOfTokens; ++t) {
token_t& tt = tokenindex_p[t];
EdgeWeight tsize = tt.sptc_d[0];
ofs.write((const char*)&tt.sptc_v[0], sizeof(tt.sptc_v[0]));
ofs.write((const char*)&tsize, sizeof(tsize));
for(NodeID c = 0; c < tsize; ++c){
ofs.write((const char*)&tt.sptc_v[1 + c], sizeof(tt.sptc_v[1 + c]));
ofs.write((const char*)&tt.sptc_d[1 + c], sizeof(tt.sptc_d[1 + c]));
ofs.write((const char*)&tt.sptc_pathv[1 + c], sizeof(tt.sptc_pathv[1 + c]));
}
}
ofs.close();
}
void save_labels_d(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&anchor_p[v], sizeof(anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&r_anchor_p[v], sizeof(r_anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
ofs.write((const char*)&numOfTokens, sizeof(numOfTokens));
for (NodeID t = 0; t < numOfTokens; ++t) {
token_t& tt = tokenindex_p[t];
EdgeWeight tsize = tt.sptc_d[0];
ofs.write((const char*)&tt.sptc_v[0], sizeof(tt.sptc_v[0]));
ofs.write((const char*)&tsize, sizeof(tsize));
for(NodeID c = 0; c < tsize; ++c){
ofs.write((const char*)&tt.sptc_v[1 + c], sizeof(tt.sptc_v[1 + c]));
ofs.write((const char*)&tt.sptc_d[1 + c], sizeof(tt.sptc_d[1 + c]));
}
}
ofs.write((const char*)&r_numOfTokens, sizeof(r_numOfTokens));
for (NodeID t = 0; t < r_numOfTokens; ++t) {
token_t& tt = r_tokenindex_p[t];
EdgeWeight tsize = tt.sptc_d[0];
ofs.write((const char*)&tt.sptc_v[0], sizeof(tt.sptc_v[0]));
ofs.write((const char*)&tsize, sizeof(tsize));
for(NodeID c = 0; c < tsize; ++c){
ofs.write((const char*)&tt.sptc_v[1 + c], sizeof(tt.sptc_v[1 + c]));
ofs.write((const char*)&tt.sptc_d[1 + c], sizeof(tt.sptc_d[1 + c]));
}
}
ofs.close();
}
void load_labels_path(const char* load_filename) {
total_children = 0;
tokenindex_p = NULL;
anchor_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
NodeID anchor_id;
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
anchor_p[v] = anchor_id;
}
ifs.read((char*)&isize, sizeof(isize));
numOfTokens = isize;
tokenindex_p = (token_t*)memalign(64, numOfTokens * sizeof(token_t));
EdgeWeight csize;
NodeID cid;
EdgeWeight cd;
for (NodeID v = 0; v < numOfTokens; ++v) {
token_t& tt = tokenindex_p[v];
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&csize, sizeof(csize));
tt.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
total_children += (csize + 1);
tt.sptc_v[0] = cid;
tt.sptc_d[0] = csize;
for (NodeID i = 0; i < csize; ++i) {
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
tt.sptc_v[i + 1] = cid;
tt.sptc_d[i + 1] = cd;
}
}
ifs.close();
}
void load_labels(const char* load_filename) {
total_children = 0;
tokenindex_p = NULL;
anchor_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
NodeID anchor_id;
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
anchor_p[v] = anchor_id;
}
ifs.read((char*)&isize, sizeof(isize));
numOfTokens = isize;
tokenindex_p = (token_t*)memalign(64, numOfTokens * sizeof(token_t));
EdgeWeight csize;
NodeID cid;
EdgeWeight cd;
for (NodeID v = 0; v < numOfTokens; ++v) {
token_t& tt = tokenindex_p[v];
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&csize, sizeof(csize));
tt.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
total_children += (csize + 1);
tt.sptc_v[0] = cid;
tt.sptc_d[0] = csize;
for (NodeID i = 0; i < csize; ++i) {
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
tt.sptc_v[i + 1] = cid;
tt.sptc_d[i + 1] = cd;
}
}
ifs.close();
}
void load_labels_d(const char* load_filename) {
total_children = 0;
r_total_children = 0;
tokenindex_p = NULL;
anchor_p = NULL;
r_tokenindex_p = NULL;
r_anchor_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
r_anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
NodeID anchor_id;
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
anchor_p[v] = anchor_id;
}
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
r_anchor_p[v] = anchor_id;
}
ifs.read((char*)&isize, sizeof(isize));
numOfTokens = isize;
tokenindex_p = (token_t*)memalign(64, numOfTokens * sizeof(token_t));
EdgeWeight csize;
NodeID cid;
EdgeWeight cd;
for (NodeID v = 0; v < numOfTokens; ++v) {
token_t& tt = tokenindex_p[v];
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&csize, sizeof(csize));
tt.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
total_children += (csize + 1);
tt.sptc_v[0] = cid;
tt.sptc_d[0] = csize;
for (NodeID i = 0; i < csize; ++i) {
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
tt.sptc_v[i + 1] = cid;
tt.sptc_d[i + 1] = cd;
}
}
ifs.read((char*)&isize, sizeof(isize));
r_numOfTokens = isize;
r_tokenindex_p = (token_t*)memalign(64, r_numOfTokens * sizeof(token_t));
for (NodeID v = 0; v < r_numOfTokens; ++v) {
token_t& tt = r_tokenindex_p[v];
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&csize, sizeof(csize));
tt.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
r_total_children += (csize + 1);
tt.sptc_v[0] = cid;
tt.sptc_d[0] = csize;
for (NodeID i = 0; i < csize; ++i) {
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
tt.sptc_v[i + 1] = cid;
tt.sptc_d[i + 1] = cd;
}
}
cout << "finish loading" << endl;
ifs.close();
}
void print_stat() {
cout << "Total Token #: " << numOfTokens << endl;
cout << "Average Children (Super) Token #: " << (double)total_children/(double)numOfTokens << endl;
//cout << "Maximum Label Size: " << max_size() << endl;
}
void print_stat_d() {
cout << "Total Token #: " << numOfTokens << endl;
cout << "Total r_Token #: " << r_numOfTokens << endl;
cout << "Average Children (Super) Token #: " << (double)total_children/(double)numOfTokens << endl;
cout << "Average Children (Super) Token #: " << (double)r_total_children/(double)r_numOfTokens << endl;
// cout << "Maximum Label Size: " << max_size() << endl;
}
EdgeWeight query_p(NodeID s, NodeID t, long ts, vector<NodeID>& dis_vec, vector<long>& ts_vec, vector<NodeID>& que, vector<EdgeWeight>& que_d) {
if(s==t) return 0;
EdgeWeight distance = INF_WEIGHT;
NodeID anchor_s = anchor_p[s];
NodeID anchor_t = anchor_p[t];
NodeID que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_s;
que_t1 = que_h;
if(anchor_s < numOfVertices){
if(ts_vec[anchor_s] != ts){
ts_vec[anchor_s] = ts;
dis_vec[anchor_s] = 0;
}
}
else{
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight csize = token_v.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] != ts){
ts_vec[r] = ts;
dis_vec[r] = tdis;
}
for (EdgeWeight i = 0; i < csize; ++i){
NodeID w = token_v.sptc_v[i+1];
EdgeWeight w_d = token_v.sptc_d[i+1] + tdis;
if( w < numOfVertices){// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] != ts){
ts_vec[w] = ts;
dis_vec[w] = w_d;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_t;
if(anchor_t < numOfVertices){
if(ts_vec[anchor_t] == ts){
EdgeWeight current_dis = dis_vec[anchor_t] + 0;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_t1 = que_h;
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight csize = token_v.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] == ts){
EdgeWeight current_dis = dis_vec[r] + tdis;
if(current_dis < distance)
distance = current_dis;
}
for (EdgeWeight i = 0; i < csize; ++i){
NodeID w = token_v.sptc_v[i+1];
EdgeWeight w_d = token_v.sptc_d[i+1] + tdis;
if( w < numOfVertices){
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] == ts){
EdgeWeight current_dis = dis_vec[w] + w_d;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
return distance;
}
EdgeWeight query_p_d(NodeID s, NodeID t, long ts, vector<NodeID>& dis_vec, vector<long>& ts_vec, vector<NodeID>& que, vector<EdgeWeight>& que_d) {
if(s==t) return 0;
EdgeWeight distance = INF_WEIGHT;
NodeID anchor_s = anchor_p[s];
NodeID anchor_t = r_anchor_p[t];
NodeID que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_s;
que_t1 = que_h;
if(anchor_s < numOfVertices){
if(ts_vec[anchor_s] != ts){
ts_vec[anchor_s] = ts;
dis_vec[anchor_s] = 0;
}
}
else{
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight csize = token_v.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] != ts){
ts_vec[r] = ts;
dis_vec[r] = tdis;
}
for (EdgeWeight i = 0; i < csize; ++i){
NodeID w = token_v.sptc_v[i+1];
EdgeWeight w_d = token_v.sptc_d[i+1] + tdis;
if( w < numOfVertices){// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] != ts){
ts_vec[w] = ts;
dis_vec[w] = w_d;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_t;
if(anchor_t < numOfVertices){
if(ts_vec[anchor_t] == ts){
EdgeWeight current_dis = dis_vec[anchor_t] + 0;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_t1 = que_h;
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = r_tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight csize = token_v.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] == ts){
EdgeWeight current_dis = dis_vec[r] + tdis;
if(current_dis < distance)
distance = current_dis;
}
for (EdgeWeight i = 0; i < csize; ++i){
NodeID w = token_v.sptc_v[i+1];
EdgeWeight w_d = token_v.sptc_d[i+1] + tdis;
if( w < numOfVertices){
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] == ts){
EdgeWeight current_dis = dis_vec[w] + w_d;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
return distance;
}
void save_two_level_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&anchor_p[v], sizeof(anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
// Store supertokens
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = supertokenindex_p[v];
NodeID isize = supertoken_v.sptc_v[0];
ofs.write((const char*)&isize, sizeof(isize));
for(NodeID i = 0; i < isize; ++i){
NodeID tid = supertoken_v.sptc_v[i + 1];
EdgeWeight ew = supertoken_v.sptc_d[i + 1];
ofs.write((const char*)&tid, sizeof(tid));
ofs.write((const char*)&ew, sizeof(ew));
}
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
// Store normal tokens
ofs.write((const char*)&numOfTokens, sizeof(numOfTokens));
for (NodeID t = 0; t < numOfTokens; ++t) {
token_t& tt = tokenindex_p[t];
NodeID sid = tt.sptc_v[0];
EdgeWeight ssize = tt.sptc_d[0];
EdgeWeight fsize = supertokenindex_p[sid].sptc_d[0];
ofs.write((const char*)&sid, sizeof(sid));
ofs.write((const char*)&ssize, sizeof(ssize));
if(ssize == 0) continue;
//ofs.write((const char*)&fsize, sizeof(fsize));
//if(t < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
for(NodeID c = 0; c < fsize; ++c){
//char a = tt.sptc_fbv[c];
//ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&tt.sptc_fbv[c], sizeof(tt.sptc_fbv[c]));
// if(t < 10){
// bitset<8> s(tt.sptc_fbv[c]);
// cout << s;
// }
}
//if(t < 10)
// cout << endl;
for(NodeID c = 0; c < ssize; ++c){
//char a = tt.sptc_sbv[c];
//ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&tt.sptc_sbv[c], sizeof(tt.sptc_sbv[c]));
// if(t < 10){
// bitset<8> s(tt.sptc_sbv[c]);
// cout << s;
// }
}
//if(t < 10)
// cout << endl;
}
ofs.close();
}
void load_two_level_labels(const char* load_filename) {
total_children = 0;
tokenindex_p = NULL;
anchor_p = NULL;
supertokenindex_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
NodeID anchor_id;
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
anchor_p[v] = anchor_id;
}
//load supertokens
NodeID cid;
EdgeWeight cd;
supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t));
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = supertokenindex_p[v];
NodeID csize;
ifs.read((char*)&csize, sizeof(csize));
supertoken_v.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
supertoken_v.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
supertoken_v.sptc_v[0] = csize;
NodeID intsize = ceil((double)ceil((double)csize / (double)8) / (double)8);
supertoken_v.sptc_d[0] = intsize;
total_children += csize;
for(EdgeWeight i = 0; i < csize; ++i){
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
supertoken_v.sptc_v[i + 1] = cid;
supertoken_v.sptc_d[i + 1] = cd;
}
}
cout << "loaded supertokens" << endl;
cout << "Average Children Super Token #: " << (double)total_children/(double)numOfVertices << endl;
ifs.read((char*)&isize, sizeof(isize));
numOfTokens = isize;
NodeID sid;
EdgeWeight ssize;
EdgeWeight fsize;
cout<< numOfTokens << " tokens in total." << endl;
tokenindex_p = (token_t*)memalign(64, numOfTokens * sizeof(token_t));
for (NodeID v = 0; v < numOfTokens; ++v) {
token_t& tt = tokenindex_p[v];
ifs.read((char*)&sid, sizeof(sid));
ifs.read((char*)&ssize, sizeof(ssize));
tt.sptc_v = (NodeID*)memalign(64, 1 * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, 1 * sizeof(EdgeWeight));
tt.sptc_v[0] = sid;
tt.sptc_d[0] = ssize;
fsize = supertokenindex_p[sid].sptc_d[0];
if(ssize == 0) continue;
//if(v < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
tt.sptc_fbv = (unsigned char*)memalign(64, fsize * sizeof(unsigned char));
// unsigned char fb;
char fb;
for (NodeID i = 0; i < fsize; ++i) {
ifs.read((char*)&(tt.sptc_fbv[i]), sizeof(tt.sptc_fbv[i]));
//ifs.read((char*)&fb, sizeof(fb));
// if(v < 10){
// bitset<8> s(tt.sptc_fbv[i]);
// cout << s;
// }
}
//if(v < 10)
// cout << endl;
tt.sptc_sbv = (unsigned char*)memalign(64, ssize * sizeof(unsigned char));
//unsigned char sb;
char sb;
for (NodeID i = 0; i < ssize; ++i) {
ifs.read((char*)&(tt.sptc_sbv[i]), sizeof(tt.sptc_sbv[i]));
//ifs.read((char*)&sb, sizeof(sb));
// if(v < 10){
// bitset<8> s(tt.sptc_sbv[i]);
// cout << s;
//}
}
//if(v < 10)
// cout << endl;
//
}
cout << "loaded standard tokens" << endl;
ifs.close();
}
void save_two_level_labels_path(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&anchor_p[v], sizeof(anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
// Store supertokens
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = supertokenindex_p[v];
NodeID isize = supertoken_v.sptc_v[0];
ofs.write((const char*)&isize, sizeof(isize));
for(NodeID i = 0; i < isize; ++i){
NodeID tid = supertoken_v.sptc_v[i + 1];
EdgeWeight ew = supertoken_v.sptc_d[i + 1];
NodeID pid = supertoken_v.sptc_pathv[i + 1];
ofs.write((const char*)&tid, sizeof(tid));
ofs.write((const char*)&ew, sizeof(ew));
ofs.write((const char*)&pid, sizeof(pid));
}
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
// Store normal tokens
ofs.write((const char*)&numOfTokens, sizeof(numOfTokens));
for (NodeID t = 0; t < numOfTokens; ++t) {
token_t& tt = tokenindex_p[t];
NodeID sid = tt.sptc_v[0];
EdgeWeight ssize = tt.sptc_d[0];
EdgeWeight fsize = supertokenindex_p[sid].sptc_d[0];
ofs.write((const char*)&sid, sizeof(sid));
ofs.write((const char*)&ssize, sizeof(ssize));
if(ssize == 0) continue;
//ofs.write((const char*)&fsize, sizeof(fsize));
//if(t < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
for(NodeID c = 0; c < fsize; ++c){
//char a = tt.sptc_fbv[c];
//ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&tt.sptc_fbv[c], sizeof(tt.sptc_fbv[c]));
// if(t < 10){
// bitset<8> s(tt.sptc_fbv[c]);
// cout << s;
// }
}
//if(t < 10)
// cout << endl;
for(NodeID c = 0; c < ssize; ++c){
//char a = tt.sptc_sbv[c];
//ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&tt.sptc_sbv[c], sizeof(tt.sptc_sbv[c]));
// if(t < 10){
// bitset<8> s(tt.sptc_sbv[c]);
// cout << s;
// }
}
//if(t < 10)
// cout << endl;
}
ofs.close();
}
void load_two_level_labels_path(const char* load_filename) {
total_children = 0;
tokenindex_p = NULL;
anchor_p = NULL;
supertokenindex_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
NodeID anchor_id;
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
anchor_p[v] = anchor_id;
}
//load supertokens
NodeID cid;
EdgeWeight cd;
supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t));
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = supertokenindex_p[v];
NodeID csize;
ifs.read((char*)&csize, sizeof(csize));
supertoken_v.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
supertoken_v.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
supertoken_v.sptc_pathv = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
supertoken_v.sptc_v[0] = csize;
NodeID intsize = ceil((double)ceil((double)csize / (double)8) / (double)8);
supertoken_v.sptc_d[0] = intsize;
supertoken_v.sptc_pathv[0] = numOfVertices;
total_children += csize;
for(EdgeWeight i = 0; i < csize; ++i){
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
supertoken_v.sptc_v[i + 1] = cid;
supertoken_v.sptc_d[i + 1] = cd;
ifs.read((char*)&cid, sizeof(cid));
supertoken_v.sptc_pathv[i + 1] = cid;
}
}
cout << "loaded supertokens" << endl;
cout << "Average Children Super Token #: " << (double)total_children/(double)numOfVertices << endl;
ifs.read((char*)&isize, sizeof(isize));
numOfTokens = isize;
NodeID sid;
EdgeWeight ssize;
EdgeWeight fsize;
cout<< numOfTokens << " tokens in total." << endl;
tokenindex_p = (token_t*)memalign(64, numOfTokens * sizeof(token_t));
for (NodeID v = 0; v < numOfTokens; ++v) {
token_t& tt = tokenindex_p[v];
ifs.read((char*)&sid, sizeof(sid));
ifs.read((char*)&ssize, sizeof(ssize));
tt.sptc_v = (NodeID*)memalign(64, 1 * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, 1 * sizeof(EdgeWeight));
tt.sptc_v[0] = sid;
tt.sptc_d[0] = ssize;
fsize = supertokenindex_p[sid].sptc_d[0];
if(ssize == 0) continue;
//if(v < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
tt.sptc_fbv = (unsigned char*)memalign(64, fsize * sizeof(unsigned char));
// unsigned char fb;
char fb;
for (NodeID i = 0; i < fsize; ++i) {
ifs.read((char*)&(tt.sptc_fbv[i]), sizeof(tt.sptc_fbv[i]));
//ifs.read((char*)&fb, sizeof(fb));
// if(v < 10){
// bitset<8> s(tt.sptc_fbv[i]);
// cout << s;
// }
}
//if(v < 10)
// cout << endl;
tt.sptc_sbv = (unsigned char*)memalign(64, ssize * sizeof(unsigned char));
//unsigned char sb;
char sb;
for (NodeID i = 0; i < ssize; ++i) {
ifs.read((char*)&(tt.sptc_sbv[i]), sizeof(tt.sptc_sbv[i]));
//ifs.read((char*)&sb, sizeof(sb));
// if(v < 10){
// bitset<8> s(tt.sptc_sbv[i]);
// cout << s;
//}
}
//if(v < 10)
// cout << endl;
//
}
cout << "loaded standard tokens" << endl;
ifs.close();
}
void save_two_level_labels_d(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
//cout << "1" << endl;
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&anchor_p[v], sizeof(anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&r_anchor_p[v], sizeof(r_anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
// Store supertokens
// cout << "2" << endl;
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = supertokenindex_p[v];
NodeID isize = supertoken_v.sptc_v[0];
ofs.write((const char*)&isize, sizeof(isize));
for(NodeID i = 0; i < isize; ++i){
NodeID tid = supertoken_v.sptc_v[i + 1];
EdgeWeight ew = supertoken_v.sptc_d[i + 1];
ofs.write((const char*)&tid, sizeof(tid));
ofs.write((const char*)&ew, sizeof(ew));
}
}
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = r_supertokenindex_p[v];
NodeID isize = supertoken_v.sptc_v[0];
ofs.write((const char*)&isize, sizeof(isize));
for(NodeID i = 0; i < isize; ++i){
NodeID tid = supertoken_v.sptc_v[i + 1];
EdgeWeight ew = supertoken_v.sptc_d[i + 1];
ofs.write((const char*)&tid, sizeof(tid));
ofs.write((const char*)&ew, sizeof(ew));
}
}
// Store normal tokens
//cout << "3" << endl;
ofs.write((const char*)&numOfTokens, sizeof(numOfTokens));
for (NodeID t = 0; t < numOfTokens; ++t) {
// cout << "31:" << t << endl;
token_t& tt = tokenindex_p[t];
NodeID sid = tt.sptc_v[0];
EdgeWeight ssize = tt.sptc_d[0];
EdgeWeight fsize = supertokenindex_p[sid].sptc_d[0];
ofs.write((const char*)&sid, sizeof(sid));
ofs.write((const char*)&ssize, sizeof(ssize));
// cout << "32:" << t << endl;
if(ssize == 0) continue;
//ofs.write((const char*)&fsize, sizeof(fsize));
//if(t < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
// cout << "33:" << t << endl;
for(NodeID c = 0; c < fsize; ++c){
//char a = tt.sptc_fbv[c];
//ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&tt.sptc_fbv[c], sizeof(tt.sptc_fbv[c]));
// if(t < 10){
// bitset<8> s(tt.sptc_fbv[c]);
// cout << s;
// }
}
//if(t < 10)
// cout << endl;
// cout << "34:" << t << endl;
for(NodeID c = 0; c < ssize; ++c){
//char a = tt.sptc_sbv[c];
//ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&tt.sptc_sbv[c], sizeof(tt.sptc_sbv[c]));
// if(t < 10){
// bitset<8> s(tt.sptc_sbv[c]);
// cout << s;
// }
}
//if(t < 10)
// cout << endl;
}
//cout << "4" << endl;
ofs.write((const char*)&r_numOfTokens, sizeof(r_numOfTokens));
for (NodeID t = 0; t < r_numOfTokens; ++t) {
//cout << "41:" << t << endl;
token_t& tt = r_tokenindex_p[t];
NodeID sid = tt.sptc_v[0];
EdgeWeight ssize = tt.sptc_d[0];
EdgeWeight fsize = r_supertokenindex_p[sid].sptc_d[0];
ofs.write((const char*)&sid, sizeof(sid));
ofs.write((const char*)&ssize, sizeof(ssize));
if(ssize == 0) continue;
//ofs.write((const char*)&fsize, sizeof(fsize));
//if(t < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
//cout << "42:" << t << "," << fsize << endl;
for(NodeID c = 0; c < fsize; ++c){
ofs.write((const char*)&tt.sptc_fbv[c], sizeof(tt.sptc_fbv[c]));
}
//cout << "43:" << t << "," << ssize << endl;
for(NodeID c = 0; c < ssize; ++c){
ofs.write((const char*)&tt.sptc_sbv[c], sizeof(tt.sptc_sbv[c]));
}
}
ofs.close();
}
void load_two_level_labels_d(const char* load_filename) {
total_children = 0;
tokenindex_p = NULL;
anchor_p = NULL;
supertokenindex_p = NULL;
r_total_children = 0;
r_tokenindex_p = NULL;
r_anchor_p = NULL;
r_supertokenindex_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
r_anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
NodeID anchor_id;
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
anchor_p[v] = anchor_id;
}
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
r_anchor_p[v] = anchor_id;
}
//load supertokens
NodeID cid;
EdgeWeight cd;
supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t));
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = supertokenindex_p[v];
NodeID csize;
ifs.read((char*)&csize, sizeof(csize));
supertoken_v.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
supertoken_v.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
supertoken_v.sptc_v[0] = csize;
NodeID intsize = ceil((double)ceil((double)csize / (double)8) / (double)8);
supertoken_v.sptc_d[0] = intsize;
total_children += csize;
for(EdgeWeight i = 0; i < csize; ++i){
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
supertoken_v.sptc_v[i + 1] = cid;
supertoken_v.sptc_d[i + 1] = cd;
}
}
r_supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t));
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = r_supertokenindex_p[v];
NodeID csize;
ifs.read((char*)&csize, sizeof(csize));
supertoken_v.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
supertoken_v.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
supertoken_v.sptc_v[0] = csize;
NodeID intsize = ceil((double)ceil((double)csize / (double)8) / (double)8);
supertoken_v.sptc_d[0] = intsize;
r_total_children += csize;
for(EdgeWeight i = 0; i < csize; ++i){
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
supertoken_v.sptc_v[i + 1] = cid;
supertoken_v.sptc_d[i + 1] = cd;
}
}
cout << "loaded supertokens" << endl;
cout << "Average Children Super Token #: " << (double)total_children/(double)numOfVertices << endl;
cout << "Average Children Super Token #: " << (double)r_total_children/(double)numOfVertices << endl;
ifs.read((char*)&isize, sizeof(isize));
numOfTokens = isize;
NodeID sid;
EdgeWeight ssize;
EdgeWeight fsize;
cout<< numOfTokens << " tokens in total." << endl;
tokenindex_p = (token_t*)memalign(64, numOfTokens * sizeof(token_t));
for (NodeID v = 0; v < numOfTokens; ++v) {
token_t& tt = tokenindex_p[v];
ifs.read((char*)&sid, sizeof(sid));
ifs.read((char*)&ssize, sizeof(ssize));
tt.sptc_v = (NodeID*)memalign(64, 1 * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, 1 * sizeof(EdgeWeight));
tt.sptc_v[0] = sid;
tt.sptc_d[0] = ssize;
fsize = supertokenindex_p[sid].sptc_d[0];
if(ssize == 0) continue;
//if(v < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
tt.sptc_fbv = (unsigned char*)memalign(64, fsize * sizeof(unsigned char));
// unsigned char fb;
char fb;
for (NodeID i = 0; i < fsize; ++i) {
ifs.read((char*)&(tt.sptc_fbv[i]), sizeof(tt.sptc_fbv[i]));
//ifs.read((char*)&fb, sizeof(fb));
// if(v < 10){
// bitset<8> s(tt.sptc_fbv[i]);
// cout << s;
// }
}
//if(v < 10)
// cout << endl;
tt.sptc_sbv = (unsigned char*)memalign(64, ssize * sizeof(unsigned char));
//unsigned char sb;
char sb;
for (NodeID i = 0; i < ssize; ++i) {
ifs.read((char*)&(tt.sptc_sbv[i]), sizeof(tt.sptc_sbv[i]));
//ifs.read((char*)&sb, sizeof(sb));
// if(v < 10){
// bitset<8> s(tt.sptc_sbv[i]);
// cout << s;
//}
}
//if(v < 10)
// cout << endl;
//
}
ifs.read((char*)&isize, sizeof(isize));
r_numOfTokens = isize;
cout<< r_numOfTokens << " tokens in total." << endl;
r_tokenindex_p = (token_t*)memalign(64, r_numOfTokens * sizeof(token_t));
for (NodeID v = 0; v < r_numOfTokens; ++v) {
token_t& tt = r_tokenindex_p[v];
ifs.read((char*)&sid, sizeof(sid));
ifs.read((char*)&ssize, sizeof(ssize));
tt.sptc_v = (NodeID*)memalign(64, 1 * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, 1 * sizeof(EdgeWeight));
tt.sptc_v[0] = sid;
tt.sptc_d[0] = ssize;
fsize = r_supertokenindex_p[sid].sptc_d[0];
if(ssize == 0) continue;
//if(v < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
tt.sptc_fbv = (unsigned char*)memalign(64, fsize * sizeof(unsigned char));
// unsigned char fb;
char fb;
for (NodeID i = 0; i < fsize; ++i) {
ifs.read((char*)&(tt.sptc_fbv[i]), sizeof(tt.sptc_fbv[i]));
//ifs.read((char*)&fb, sizeof(fb));
// if(v < 10){
// bitset<8> s(tt.sptc_fbv[i]);
// cout << s;
// }
}
//if(v < 10)
// cout << endl;
tt.sptc_sbv = (unsigned char*)memalign(64, ssize * sizeof(unsigned char));
//unsigned char sb;
char sb;
for (NodeID i = 0; i < ssize; ++i) {
ifs.read((char*)&(tt.sptc_sbv[i]), sizeof(tt.sptc_sbv[i]));
//ifs.read((char*)&sb, sizeof(sb));
// if(v < 10){
// bitset<8> s(tt.sptc_sbv[i]);
// cout << s;
//}
}
//if(v < 10)
// cout << endl;
//
}
cout << "loaded standard tokens" << endl;
ifs.close();
}
EdgeWeight query_p_two_level(NodeID s, NodeID t, long ts, vector<NodeID>& dis_vec, vector<long>& ts_vec, vector<NodeID>& que, vector<EdgeWeight>& que_d) {
if(s==t) return 0;
EdgeWeight distance = INF_WEIGHT;
NodeID anchor_s = anchor_p[s];
NodeID anchor_t = anchor_p[t];
NodeID que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_s;
que_t1 = que_h;
if(anchor_s < numOfVertices){
if(ts_vec[anchor_s] != ts){
ts_vec[anchor_s] = ts;
dis_vec[anchor_s] = 0;
}
}
else{
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight ssize = token_v.sptc_d[0];
token_t& supertoken_r = supertokenindex_p[r];
EdgeWeight fsize = supertoken_r.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] != ts){
ts_vec[r] = ts;
dis_vec[r] = tdis;
}
EdgeWeight spos = 0;
for(EdgeWeight i = 0; i < fsize; ++i){
unsigned char fmask = token_v.sptc_fbv[i];
bitset<8> fbs(fmask);
for(NodeID j = 0; j < 8; ++j){
if(fbs[ 7 - j]){
unsigned char smask = token_v.sptc_sbv[spos++];
bitset<8> sbs(smask);
for(NodeID k = 0; k < 8; ++k){
if(sbs[7 - k]){
NodeID w = supertoken_r.sptc_v[ (i * 8 + j) * 8 + k + 1];
EdgeWeight w_d = supertoken_r.sptc_d[(i * 8 + j) * 8 + k + 1] + tdis;
if( w < numOfVertices){// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] != ts){
ts_vec[w] = ts;
dis_vec[w] = w_d;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
//if(spos == ssize) break;
}
}
//if(spos == ssize) break;
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_t;
if(anchor_t < numOfVertices){
if(ts_vec[anchor_t] == ts){
EdgeWeight current_dis = dis_vec[anchor_t] + 0;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_t1 = que_h;
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight ssize = token_v.sptc_d[0];
token_t& supertoken_r = supertokenindex_p[r];
EdgeWeight fsize = supertoken_r.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] == ts){
EdgeWeight current_dis = dis_vec[r] + tdis;
if(current_dis < distance)
distance = current_dis;
}
EdgeWeight spos = 0;
for(EdgeWeight i = 0; i < fsize; ++i){
unsigned char fmask = token_v.sptc_fbv[i];
bitset<8> fbs(fmask);
for(NodeID j = 0; j < 8; ++j){
if(fbs[7 - j]){
unsigned char smask = token_v.sptc_sbv[spos++];
bitset<8> sbs(smask);
for(NodeID k = 0; k < 8; ++k){
if(sbs[7 - k]){
NodeID w = supertoken_r.sptc_v[ (i * 8 + j) * 8 + k + 1];
EdgeWeight w_d = supertoken_r.sptc_d[(i * 8 + j) * 8 + k + 1] + tdis;
if( w < numOfVertices){// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] == ts){
EdgeWeight current_dis = dis_vec[w] + w_d;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
//if(spos == ssize) break;
}
}
//if(spos == ssize) break;
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
return distance;
}
EdgeWeight query_p_two_level_d(NodeID s, NodeID t, long ts, vector<NodeID>& dis_vec, vector<long>& ts_vec, vector<NodeID>& que, vector<EdgeWeight>& que_d) {
if(s==t) return 0;
EdgeWeight distance = INF_WEIGHT;
NodeID anchor_s = anchor_p[s];
NodeID anchor_t = r_anchor_p[t];
NodeID que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_s;
que_t1 = que_h;
if(anchor_s < numOfVertices){
if(ts_vec[anchor_s] != ts){
ts_vec[anchor_s] = ts;
dis_vec[anchor_s] = 0;
}
}
else{
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight ssize = token_v.sptc_d[0];
token_t& supertoken_r = supertokenindex_p[r];
EdgeWeight fsize = supertoken_r.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] != ts){
ts_vec[r] = ts;
dis_vec[r] = tdis;
}
EdgeWeight spos = 0;
for(EdgeWeight i = 0; i < fsize; ++i){
unsigned char fmask = token_v.sptc_fbv[i];
bitset<8> fbs(fmask);
for(NodeID j = 0; j < 8; ++j){
if(fbs[ 7 - j]){
unsigned char smask = token_v.sptc_sbv[spos++];
bitset<8> sbs(smask);
for(NodeID k = 0; k < 8; ++k){
if(sbs[7 - k]){
NodeID w = supertoken_r.sptc_v[ (i * 8 + j) * 8 + k + 1];
EdgeWeight w_d = supertoken_r.sptc_d[(i * 8 + j) * 8 + k + 1] + tdis;
if( w < numOfVertices){// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] != ts){
ts_vec[w] = ts;
dis_vec[w] = w_d;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
//if(spos == ssize) break;
}
}
//if(spos == ssize) break;
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_t;
if(anchor_t < numOfVertices){
if(ts_vec[anchor_t] == ts){
EdgeWeight current_dis = dis_vec[anchor_t] + 0;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_t1 = que_h;
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = r_tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight ssize = token_v.sptc_d[0];
token_t& supertoken_r = r_supertokenindex_p[r];
EdgeWeight fsize = supertoken_r.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] == ts){
EdgeWeight current_dis = dis_vec[r] + tdis;
if(current_dis < distance)
distance = current_dis;
}
EdgeWeight spos = 0;
for(EdgeWeight i = 0; i < fsize; ++i){
unsigned char fmask = token_v.sptc_fbv[i];
bitset<8> fbs(fmask);
for(NodeID j = 0; j < 8; ++j){
if(fbs[7 - j]){
unsigned char smask = token_v.sptc_sbv[spos++];
bitset<8> sbs(smask);
for(NodeID k = 0; k < 8; ++k){
if(sbs[7 - k]){
NodeID w = supertoken_r.sptc_v[ (i * 8 + j) * 8 + k + 1];
EdgeWeight w_d = supertoken_r.sptc_d[(i * 8 + j) * 8 + k + 1] + tdis;
if( w < numOfVertices){// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] == ts){
EdgeWeight current_dis = dis_vec[w] + w_d;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
//if(spos == ssize) break;
}
}
//if(spos == ssize) break;
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
return distance;
}
};
class Label {
public:
vector<index_t> index_;
index_t_p* index_p;
two_index_t_p* two_index_p;
double GetCurrentTimeSec() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec * 1e-6;
}
Label() {
index_.resize(numOfVertices);
}
~Label() {
Free();
}
EdgeWeight query_p(NodeID s, NodeID t) {
//
//EdgeWeight distance = INF_WEIGHT;
//NodeID *vs = index_p[s].spt_v;
//NodeID *vt = index_p[t].spt_v;
//EdgeWeight* ws = index_p[s].spt_d;
//EdgeWeight* wt = index_p[t].spt_d;
//_mm_prefetch(vs, _MM_HINT_T0);
//_mm_prefetch(vt, _MM_HINT_T0);
//_mm_prefetch(ws, _MM_HINT_T0);
//_mm_prefetch(wt, _MM_HINT_T0);
//for (unsigned i = 0, j = 0; ; ) {
// if (*(vs + i) == *(vt + j)) {
// if (*(vs + i) == numOfVertices) break; // Sentinel
// EdgeWeight td = *(ws + i) + *(wt + j);
// if (td < distance) distance = td;
// ++i;
// ++j;
// }
// else {
// i += *(vs + i) < *(vt + j) ? 1 : 0;
// j += *(vs + i) > *(vt + j) ? 1 : 0;
// }
//}
//return distance;
EdgeWeight distance = INF_WEIGHT;
const index_t_p &idx_s = index_p[s];
const index_t_p &idx_t = index_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) distance = td;
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
return distance;
}
EdgeWeight two_query_p_sequential(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
EdgeWeight ldistance = INF_WEIGHT;
const two_index_t_p &idx_s = two_index_p[s];
const two_index_t_p &idx_t = two_index_p[t];
_mm_prefetch(&idx_s.spt_lv[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_lv[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_ld[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_ld[0], _MM_HINT_T0);
for (uint8_t i = 0, j = 0; ; ) {
uint8_t uv8_1 = idx_s.spt_lv[i], uv8_2 = idx_t.spt_lv[j];
if (uv8_1 == UCHAR_MAX) break; // Sentinel
if (uv8_1 == uv8_2) {
EdgeWeight td = idx_s.spt_ld[i] + idx_t.spt_ld[j];
if (td < ldistance) ldistance = td;
++i;
++j;
}
else {
i += uv8_1 < uv8_2 ? 1 : 0;
j += uv8_1 > uv8_2 ? 1 : 0;
}
}
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) distance = td;
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
if(distance < ldistance)
return distance;
else
return ldistance;
}
EdgeWeight two_query_p_parallel(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
EdgeWeight ldistance = INF_WEIGHT;
const two_index_t_p &idx_s = two_index_p[s];
const two_index_t_p &idx_t = two_index_p[t];
#pragma omp parallel sections
{
#pragma omp section
{
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) distance = td;
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
}
#pragma omp section
{
_mm_prefetch(&idx_s.spt_lv[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_lv[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_ld[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_ld[0], _MM_HINT_T0);
for (uint8_t i = 0, j = 0; ; ) {
uint8_t uv8_1 = idx_s.spt_lv[i], uv8_2 = idx_t.spt_lv[j];
if (uv8_1 == UCHAR_MAX) break; // Sentinel
if (uv8_1 == uv8_2) {
EdgeWeight td = idx_s.spt_ld[i] + idx_t.spt_ld[j];
if (td < ldistance) ldistance = td;
++i;
++j;
}
else {
i += uv8_1 < uv8_2 ? 1 : 0;
j += uv8_1 > uv8_2 ? 1 : 0;
}
}
}
}
if(distance < ldistance)
return distance;
else
return ldistance;
}
EdgeWeight query_p_with_nums(NodeID s, NodeID t, int k) {
//
//EdgeWeight distance = INF_WEIGHT;
//NodeID *vs = index_p[s].spt_v;
//NodeID *vt = index_p[t].spt_v;
//EdgeWeight* ws = index_p[s].spt_d;
//EdgeWeight* wt = index_p[t].spt_d;
//_mm_prefetch(vs, _MM_HINT_T0);
//_mm_prefetch(vt, _MM_HINT_T0);
//_mm_prefetch(ws, _MM_HINT_T0);
//_mm_prefetch(wt, _MM_HINT_T0);
//for (unsigned i = 0, j = 0; ; ) {
// if (*(vs + i) == *(vt + j)) {
// if (*(vs + i) == numOfVertices) break; // Sentinel
// EdgeWeight td = *(ws + i) + *(wt + j);
// if (td < distance) distance = td;
// ++i;
// ++j;
// }
// else {
// i += *(vs + i) < *(vt + j) ? 1 : 0;
// j += *(vs + i) > *(vt + j) ? 1 : 0;
// }
//}
//return distance;
EdgeWeight distance = INF_WEIGHT;
const index_t_p &idx_s = index_p[s];
const index_t_p &idx_t = index_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
int k1 = k, k2 = k;
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) distance = td;
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
if (i > k1 || j > k2) break;
}
return distance;
}
EdgeWeight query(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j])
distance = min(distance, (EdgeWeight)(index_s_d[i++] + index_t_d[j++]));
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
}
return distance;
}
EdgeWeight query(NodeID s, NodeID t, NodeID& meet, EdgeWeight& dis1, EdgeWeight& dis2) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
meet = numeric_limits<NodeID>::max();
dis1 = numeric_limits<EdgeWeight>::max();
dis2 = numeric_limits<EdgeWeight>::max();
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j]) {
if (distance > (EdgeWeight)(index_s_d[i] + index_t_d[j])) {
distance = (EdgeWeight)(index_s_d[i] + index_t_d[j]);
meet = index_s[i];
dis1 = index_s_d[i];
dis2 = index_t_d[j];
}
++i; ++j;
}
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
}
return distance;
}
/*EdgeWeight query_new(NodeID s, NodeID t, Ordering& ordering) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j])
distance = min(distance, (EdgeWeight)(index_s_d[i++] + index_t_d[j++]));
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
}
return distance;
}
*/
double avg_size() {
double total = 0;
if(index_.size()!=0){
for (int i = 0; i < numOfVertices; ++i) total += index_[i].spt_v.size();
double avg = total / numOfVertices - 1; // We do not count the trivial label (V, INF_WEIGHT).
return avg;
}
total = 0;
for (int i = 0; i < numOfVertices; ++i) {
int unit_count = 0;
const index_t_p &idx_s = index_p[i];
for(int j = 0; ;){
NodeID v = idx_s.spt_v[j++];
++unit_count;
if( v == numOfVertices) break;
}
total += unit_count;
}
double avg = total / numOfVertices - 1; // We do not count the trivial label (V, INF_WEIGHT).
return avg;
}
/*
NodeID max_size() {
NodeID maxsize = numeric_limits<NodeID>::min();
for (int i = 0; i < V; ++i) maxsize = max(maxsize, index_[i].spt_v.size());
return maxsize;
}*/
void append(NodeID v, NodeID root, EdgeWeight distance) {
index_[v].spt_v.push_back(root);
index_[v].spt_d.push_back(distance);
}
void print_stat() {
cout << "Average_label_size: " << avg_size() << endl;
//cout << "Maximum Label Size: " << max_size() << endl;
}
void Free() {
if (index_.size() == 0) return;
for (int v = 0; v < numOfVertices; ++v) {
index_[v].spt_v.clear();
index_[v].spt_d.clear();
}
index_.clear();
}
void save_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
NodeID isize = index_[v].size();
ofs.write((const char*)&isize, sizeof(isize));
for (NodeID i = 0; i < index_[v].size(); ++i) {
ofs.write((const char*)&index_[v].spt_v[i], sizeof(index_[v].spt_v[i]));
ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
}
ofs.close();
}
void load_labels(const char* load_filename) {
/* for (NodeID v = 0; v < numOfVertices; ++v) {
free(index_p[v].spt_v);
free(index_p[v].spt_d);
}
*/
//free(index_p);
index_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_p = (index_t_p*)memalign(64, numOfVertices * sizeof(index_t_p));
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_p &idx = index_p[v];
ifs.read((char*)&isize, sizeof(isize));
idx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_v[i] = hub;
idx.spt_d[i] = hub_weight;
}
}
ifs.close();
/*
index_.clear();
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_.resize(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&isize, sizeof(isize));
index_[v].spt_v.resize(isize);
index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < index_[v].size(); ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
index_[v].spt_v[i] = hub;
index_[v].spt_d[i] = hub_weight;
}
}
ifs.close();
*/
}
void convert_to_fewerbit(){
two_index_p = NULL;
two_index_p = (two_index_t_p*)memalign(64, numOfVertices * sizeof(two_index_t_p));
double compressed_size = 0;
double total_size = 0;
for (NodeID v = 0; v < numOfVertices; ++v) {
two_index_t_p &idx = two_index_p[v];
index_t_p &idx_original = index_p[v];
NodeID isize = 0;
for(NodeID i = 0; idx_original.spt_v[i] < UCHAR_MAX; ++i){
++isize;
}
idx.spt_lv = (uint8_t*)memalign(64, (isize + 1) * sizeof(uint8_t));
idx.spt_ld = (EdgeWeight*)memalign(64, (isize + 1) * sizeof(EdgeWeight));
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < isize; ++i) {
uint8_t hub;
EdgeWeight hub_weight;
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_lv[i] = idx_original.spt_v[i];
idx.spt_ld[i] = idx_original.spt_d[i];
}
compressed_size += 4 * (isize - 1) - isize;
idx.spt_lv[isize] = UCHAR_MAX;
idx.spt_ld[isize] = INF_WEIGHT;
NodeID larger_size = 0;
for(NodeID i = isize; idx_original.spt_v[i] != numOfVertices; ++i){
++larger_size;
}
larger_size++;
idx.spt_v = (NodeID*)memalign(64, larger_size * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, larger_size * sizeof(EdgeWeight));
for (NodeID i = 0; i < larger_size; ++i) {
uint8_t hub;
EdgeWeight hub_weight;
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_v[i] = idx_original.spt_v[i + isize];
idx.spt_d[i] = idx_original.spt_d[i + isize];
}
total_size += 4 * (isize - 1 + larger_size) * 2;
}
cout << "reduce size :" << compressed_size << " out of " << total_size << " saving " << int(compressed_size * 100 / total_size) << "%" << endl;
}
void load_labels_with_k(const char* load_filename, int k) {
/* for (NodeID v = 0; v < numOfVertices; ++v) {
free(index_p[v].spt_v);
free(index_p[v].spt_d);
}
*/
//free(index_p);
long total_amount = 0;
long actual_amount = 0;
index_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_p = (index_t_p*)memalign(64, numOfVertices * sizeof(index_t_p));
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_p &idx = index_p[v];
ifs.read((char*)&isize, sizeof(isize));
int actual_isize = k;
if (isize > k) actual_isize = k;
else actual_isize = isize;
total_amount += isize;
actual_amount += actual_isize;
idx.spt_v = (NodeID*)memalign(64, actual_isize * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, actual_isize * sizeof(EdgeWeight));
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
if (i > actual_isize) continue;
if (i == actual_isize - 1) {
idx.spt_v[i] = numOfVertices;
idx.spt_d[i] = INF_WEIGHT;
}else {
idx.spt_v[i] = hub;
idx.spt_d[i] = hub_weight;
}
}
}
ifs.close();
cout << "Total Labels:" << total_amount << endl;
cout << "Actual Labels:" << actual_amount << endl;
/*
index_.clear();
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_.resize(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&isize, sizeof(isize));
index_[v].spt_v.resize(isize);
index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < index_[v].size(); ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
index_[v].spt_v[i] = hub;
index_[v].spt_d[i] = hub_weight;
}
}
ifs.close();
*/
}
void save_labels_iteration_stats(const char* save_filename) {
vector<NodeID> stat(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
for (NodeID i = 0; i < index_[v].size(); ++i)
stat[index_[v].spt_v[i]]++;
}
ofstream ofs(save_filename);
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs << stat[v] << endl;
}
ofs.close();
}
EdgeWeight query_with_info(NodeID s, NodeID t, query_info& q_info) {
double stime = GetCurrentTimeSec();
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
q_info.meet_node = numOfVertices;
double meet_distance;
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j]) {
meet_distance = (EdgeWeight)(index_s_d[i++] + index_t_d[j++]);
if ( distance > meet_distance) {
distance = meet_distance;
q_info.meet_node = index_s[i];
}
}
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
};
stime = GetCurrentTimeSec() - stime;
q_info.time_cost = stime;
if (index_s.size() < index_t.size())
q_info.search_len = index_s.size();
else
q_info.search_len = index_t.size();
return distance;
}
};
class PLabel {
public:
vector<index_t_path> index_;
index_t_path_p* index_p;
double GetCurrentTimeSec() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec * 1e-6;
}
PLabel() {
index_.resize(numOfVertices);
}
~PLabel() {
Free();
}
EdgeWeight query_p(NodeID s, NodeID t) {
//EdgeWeight distance = INF_WEIGHT;
//NodeID *vs = index_p[s].spt_v;
//NodeID *vt = index_p[t].spt_v;
//EdgeWeight* ws = index_p[s].spt_d;
//EdgeWeight* wt = index_p[t].spt_d;
//_mm_prefetch(vs, _MM_HINT_T0);
//_mm_prefetch(vt, _MM_HINT_T0);
//_mm_prefetch(ws, _MM_HINT_T0);
//_mm_prefetch(wt, _MM_HINT_T0);
//for (unsigned i = 0, j = 0; ; ) {
// if (*(vs + i) == *(vt + j)) {
// if (*(vs + i) == numOfVertices) break; // Sentinel
// EdgeWeight td = *(ws + i) + *(wt + j);
// if (td < distance) distance = td;
// ++i;
// ++j;
// }
// else {
// i += *(vs + i) < *(vt + j) ? 1 : 0;
// j += *(vs + i) > *(vt + j) ? 1 : 0;
// }
//}
//return distance;
EdgeWeight distance = INF_WEIGHT;
NodeID meet;
const index_t_path_p &idx_s = index_p[s];
const index_t_path_p &idx_t = index_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) {
distance = td;
}
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
return distance;
}
EdgeWeight query(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j])
distance = min(distance, (EdgeWeight)(index_s_d[i++] + index_t_d[j++]));
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
}
return distance;
}
EdgeWeight query(NodeID s, NodeID t, NodeID& meet, EdgeWeight& dis1, EdgeWeight& dis2) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
meet = numeric_limits<NodeID>::max();
dis1 = numeric_limits<EdgeWeight>::max();
dis2 = numeric_limits<EdgeWeight>::max();
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j]) {
if (distance >(EdgeWeight)(index_s_d[i] + index_t_d[j])) {
distance = (EdgeWeight)(index_s_d[i] + index_t_d[j]);
meet = index_s[i];
dis1 = index_s_d[i];
dis2 = index_t_d[j];
}
++i; ++j;
}
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
}
return distance;
}
EdgeWeight query_path(NodeID s, NodeID t, vector<NodeID>& rank, vector<NodeID>& inv) {
EdgeWeight distance = INF_WEIGHT;
NodeID meetnode = numOfVertices;
NodeID s_parent;
NodeID t_parent;
const index_t_path_p &idx_s = index_p[s];
const index_t_path_p &idx_t = index_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_p[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_p[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) {
distance = td;
// if (v1 < meetnode) {
meetnode = v1;
s_parent = idx_s.spt_p[i];
t_parent = idx_t.spt_p[j];
//}
}
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
//Next, retrieve path from s - meetnode and meetnode - t.
vector<NodeID> path_from_s;
vector<NodeID> path_to_t;
path_from_s.push_back(s_parent);
path_to_t.push_back(t_parent);
int operation = 0;
/* if (s == 194569 && t == 20072)
cout << "debug." << " meet: " << meetnode << " sparent:" << s_parent << " tparent:" << t_parent << endl;*/
NodeID inv_meetnode = inv[meetnode];
while (path_from_s.back() != inv_meetnode) {
/*if (s == 194569 && t == 20072)
cout << "s meet:" << path_from_s.back() << endl;*/
const index_t_path_p &idx_from_s = index_p[path_from_s.back()];
_mm_prefetch(&idx_from_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_from_s.spt_p[0], _MM_HINT_T0);
// vector<NodeID>& index_from_s = index_[path_from_s.back()].spt_v;
for (int i = 0; ; ++i) {
operation++;
if (idx_from_s.spt_v[i] == numOfVertices) break;
if (idx_from_s.spt_v[i] == meetnode) {
path_from_s.push_back(idx_from_s.spt_p[i]);
break;
}
}
}
while (path_to_t.back() != inv_meetnode) {
/*if (s == 194569 && t == 20072)
cout << "t meet:" << path_to_t.back() << endl;*/
// vector<NodeID>& index_to_t = index_[path_to_t.back()].spt_v;
const index_t_path_p &idx_to_t = index_p[path_to_t.back()];
_mm_prefetch(&idx_to_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_to_t.spt_p[0], _MM_HINT_T0);
for (int i = 0; ; ++i) {
operation++;
if (idx_to_t.spt_v[i] == numOfVertices) break;
if (idx_to_t.spt_v[i] == meetnode) {
path_to_t.push_back(idx_to_t.spt_p[i]);
break;
}
}
}
distance = 0;
distance += path_from_s.size() + path_to_t.size();
// return distance;
return distance;
//EdgeWeight distance = INF_WEIGHT;
//vector<NodeID>& index_s = index_[s].spt_v;
//vector<EdgeWeight>& index_s_d = index_[s].spt_d;
//vector<NodeID>& bindex_t = index_[t].spt_v;
//vector<EdgeWeight>& bindex_t_d = index_[t].spt_d;
//NodeID meetnode = numOfVertices;
//int s_parent;
//int t_parent;
//for (int i = 0, j = 0; i < index_s.size(), j < bindex_t.size(); ) {
// if (index_s[i] == bindex_t[j]) {
// if (distance >(EdgeWeight)(index_s_d[i] + bindex_t_d[j])) {
// distance = (EdgeWeight)(index_s_d[i] + bindex_t_d[j]);
// if (index_s[i] < meetnode) {
// meetnode = index_s[i];
// s_parent = index_[s].spt_p[i];
// t_parent = index_[t].spt_p[j];
// }
// }
// //distance = min(distance, (EdgeWeight)(index_s_d[i] + bindex_t_d[j]));
// ++i;
// ++j;
// }
// else {
// if (index_s[i] < bindex_t[j])
// ++i;
// else
// ++j;
// }
//}
////Next, retrieve path from s - meetnode and meetnode - t.
//vector<NodeID> path_from_s;
//vector<NodeID> path_to_t;
//path_from_s.push_back(s_parent);
//path_to_t.push_back(t_parent);
///* if (s == 194569 && t == 20072)
//cout << "debug." << " meet: " << meetnode << " sparent:" << s_parent << " tparent:" << t_parent << endl;*/
//while (path_from_s.back() != inv[meetnode]) {
// /*if (s == 194569 && t == 20072)
// cout << "s meet:" << path_from_s.back() << endl;*/
// vector<NodeID>& index_from_s = index_[path_from_s.back()].spt_v;
// for (int i = 0; i < index_from_s.size(); ++i) {
// if (index_from_s[i] == meetnode) {
// path_from_s.push_back(index_[path_from_s.back()].spt_p[i]);
// break;
// }
// }
//}
//while (path_to_t.back() != inv[meetnode]) {
// /*if (s == 194569 && t == 20072)
// cout << "t meet:" << path_to_t.back() << endl;*/
// vector<NodeID>& index_to_t = index_[path_to_t.back()].spt_v;
// for (int i = 0; i < index_to_t.size(); ++i) {
// if (index_to_t[i] == meetnode) {
// path_to_t.push_back(index_[path_to_t.back()].spt_p[i]);
// break;
// }
// }
//}
////for (int i = 0; i < path_from_s.size(); ++i)
//// path_from_s[i] = inv[path_from_s[i]];
////for (int i = 0; i < path_to_t.size(); ++i)
//// path_to_t[i] = inv[path_to_t[i]];
//return path_from_s.size() + path_to_t.size();
}
EdgeWeight query_path_check(NodeID s, NodeID t, vector<NodeID>& rank, vector<NodeID>& inv) {
EdgeWeight distance = INF_WEIGHT;
NodeID meetnode = numOfVertices;
NodeID s_parent;
NodeID t_parent;
const index_t_path_p &idx_s = index_p[s];
const index_t_path_p &idx_t = index_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_p[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_p[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) {
distance = td;
// if (v1 < meetnode) {
meetnode = v1;
s_parent = idx_s.spt_p[i];
t_parent = idx_t.spt_p[j];
//}
}
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
NodeID inv_meetnode = inv[meetnode];
//Next, retrieve path from s - meetnode and meetnode - t.
vector<NodeID> path_from_s;
vector<NodeID> path_to_t;
if(s !=inv_meetnode)
path_from_s.push_back(s);
path_from_s.push_back(s_parent);
if (t != inv_meetnode)
path_to_t.push_back(t);
path_to_t.push_back(t_parent);
/* if (s == 194569 && t == 20072)
cout << "debug." << " meet: " << meetnode << " sparent:" << s_parent << " tparent:" << t_parent << endl;*/
while (path_from_s.back() != inv_meetnode) {
/*if (s == 194569 && t == 20072)
cout << "s meet:" << path_from_s.back() << endl;*/
const index_t_path_p &idx_from_s = index_p[path_from_s.back()];
_mm_prefetch(&idx_from_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_from_s.spt_p[0], _MM_HINT_T0);
// vector<NodeID>& index_from_s = index_[path_from_s.back()].spt_v;
for (int i = 0; ; ++i) {
if (idx_from_s.spt_v[i] == numOfVertices) break;
if (idx_from_s.spt_v[i] == meetnode) {
path_from_s.push_back(idx_from_s.spt_p[i]);
break;
}
}
}
while (path_to_t.back() != inv_meetnode) {
/*if (s == 194569 && t == 20072)
cout << "t meet:" << path_to_t.back() << endl;*/
// vector<NodeID>& index_to_t = index_[path_to_t.back()].spt_v;
const index_t_path_p &idx_to_t = index_p[path_to_t.back()];
_mm_prefetch(&idx_to_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_to_t.spt_p[0], _MM_HINT_T0);
for (int i = 0; ; ++i) {
if (idx_to_t.spt_v[i] == numOfVertices) break;
if (idx_to_t.spt_v[i] == meetnode) {
path_to_t.push_back(idx_to_t.spt_p[i]);
break;
}
}
}
//return distance;
EdgeWeight alldis = 0;
if (path_from_s.size() == 1)
if (s != inv_meetnode)
alldis += query_p(s, inv_meetnode);
if (path_to_t.size() == 1)
if (t != inv_meetnode)
alldis += query_p(t, inv_meetnode);
for (int i = 0; i < path_from_s.size() - 1; ++i) {
alldis += query_p(path_from_s[i], path_from_s[i + 1]);
//cout << "s " << path_from_s[i] << "," << path_from_s[i + 1] << endl;
}
for (int i = 0; i < path_to_t.size() - 1; ++i) {
alldis += query_p(path_to_t[i], path_to_t[i + 1]);
//cout <<"t " << path_to_t[i] << "," << path_to_t[i + 1] << endl;
}
/*if (distance != alldis)
cout << "a?" << endl;*/
//cout << distance << "," << alldis << "," << path_from_s.size() + path_to_t.size() << endl;
// cout << s << "," << t << "," << inv_meetnode << " " << distance << "vs." << alldis << endl;
return distance;
}
//EdgeWeight query_path_check(NodeID s, NodeID t, vector<NodeID>& rank, vector<NodeID>& inv) {
// EdgeWeight distance = INF_WEIGHT;
// NodeID meetnode = numOfVertices;
// NodeID s_parent;
// NodeID t_parent;
// const index_t_path_p &idx_s = index_p[s];
// const index_t_path_p &idx_t = index_p[t];
// _mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
// _mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
// _mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
// _mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
// _mm_prefetch(&idx_s.spt_p[0], _MM_HINT_T0);
// _mm_prefetch(&idx_t.spt_p[0], _MM_HINT_T0);
// for (int i = 0, j = 0; ; ) {
// NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
// if (v1 == numOfVertices) break; // Sentinel
// if (v1 == v2) {
// EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
// if (td < distance) {
// distance = td;
// if (v1 < meetnode) {
// meetnode = v1;
// s_parent = idx_s.spt_p[i];
// t_parent = idx_t.spt_p[j];
// }
// }
// ++i;
// ++j;
// }
// else {
// i += v1 < v2 ? 1 : 0;
// j += v1 > v2 ? 1 : 0;
// }
// }
// //Next, retrieve path from s - meetnode and meetnode - t.
// vector<NodeID> path_from_s;
// vector<NodeID> path_to_t;
// path_from_s.push_back(s_parent);
// path_to_t.push_back(t_parent);
// /* if (s == 194569 && t == 20072)
// cout << "debug." << " meet: " << meetnode << " sparent:" << s_parent << " tparent:" << t_parent << endl;*/
// NodeID inv_meetnode = inv[meetnode];
// while (path_from_s.back() != inv_meetnode) {
// /*if (s == 194569 && t == 20072)
// cout << "s meet:" << path_from_s.back() << endl;*/
// const index_t_path_p &idx_from_s = index_p[path_from_s.back()];
// _mm_prefetch(&idx_from_s.spt_v[0], _MM_HINT_T0);
// _mm_prefetch(&idx_from_s.spt_p[0], _MM_HINT_T0);
// // vector<NodeID>& index_from_s = index_[path_from_s.back()].spt_v;
// for (int i = 0; ; ++i) {
// if (idx_from_s.spt_v[i] == numOfVertices) break;
// if (idx_from_s.spt_v[i] == meetnode) {
// path_from_s.push_back(idx_from_s.spt_p[i]);
// break;
// }
// }
// }
// while (path_to_t.back() != inv_meetnode) {
// /*if (s == 194569 && t == 20072)
// cout << "t meet:" << path_to_t.back() << endl;*/
// // vector<NodeID>& index_to_t = index_[path_to_t.back()].spt_v;
// const index_t_path_p &idx_to_t = index_p[path_to_t.back()];
// _mm_prefetch(&idx_to_t.spt_v[0], _MM_HINT_T0);
// _mm_prefetch(&idx_to_t.spt_p[0], _MM_HINT_T0);
// for (int i = 0; ; ++i) {
// if (idx_to_t.spt_v[i] == numOfVertices) break;
// if (idx_to_t.spt_v[i] == meetnode) {
// path_to_t.push_back(idx_to_t.spt_p[i]);
// break;
// }
// }
// }
// EdgeWeight path_from_s = 0;
// for (int i = 0; i < path_from_s.size(); ++i) {
// }
//
// return distance;
//
//}
/*EdgeWeight query_new(NodeID s, NodeID t, Ordering& ordering) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j])
distance = min(distance, (EdgeWeight)(index_s_d[i++] + index_t_d[j++]));
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
}
return distance;
}
*/
double avg_size() {
double total = 0;
for (int i = 0; i < numOfVertices; ++i) total += index_[i].spt_v.size();
double avg = total / numOfVertices - 1; // We do not count the trivial label (V, INF_WEIGHT).
return avg;
}
/*
NodeID max_size() {
NodeID maxsize = numeric_limits<NodeID>::min();
for (int i = 0; i < V; ++i) maxsize = max(maxsize, index_[i].spt_v.size());
return maxsize;
}*/
void append(NodeID v, NodeID root, EdgeWeight distance) {
index_[v].spt_v.push_back(root);
index_[v].spt_d.push_back(distance);
}
void print_stat() {
cout << "Average_label_size: " << avg_size() << endl;
//cout << "Maximum Label Size: " << max_size() << endl;
}
void Free() {
if (index_.size() == 0) return;
for (int v = 0; v < numOfVertices; ++v) {
index_[v].spt_v.clear();
index_[v].spt_d.clear();
}
index_.clear();
}
void save_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
NodeID isize = index_[v].size();
ofs.write((const char*)&isize, sizeof(isize));
for (NodeID i = 0; i < index_[v].size(); ++i) {
ofs.write((const char*)&index_[v].spt_v[i], sizeof(index_[v].spt_v[i]));
ofs.write((const char*)&index_[v].spt_p[i], sizeof(index_[v].spt_p[i]));
ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
}
ofs.close();
}
void load_labels(const char* load_filename) {
/* for (NodeID v = 0; v < numOfVertices; ++v) {
free(index_p[v].spt_v);
free(index_p[v].spt_d);
}
*/
//free(index_p);
index_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_p = (index_t_path_p*)memalign(64, numOfVertices * sizeof(index_t_path_p));
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_path_p &idx = index_p[v];
ifs.read((char*)&isize, sizeof(isize));
idx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_p = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
NodeID hub_parent;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_parent, sizeof(hub_parent));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_v[i] = hub;
idx.spt_p[i] = hub_parent;
idx.spt_d[i] = hub_weight;
}
}
ifs.close();
/*
index_.clear();
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_.resize(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&isize, sizeof(isize));
index_[v].spt_v.resize(isize);
index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < index_[v].size(); ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
index_[v].spt_v[i] = hub;
index_[v].spt_d[i] = hub_weight;
}
}
ifs.close();
*/
}
void save_labels_iteration_stats(const char* save_filename) {
vector<NodeID> stat(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
for (NodeID i = 0; i < index_[v].size(); ++i)
stat[index_[v].spt_v[i]]++;
}
ofstream ofs(save_filename);
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs << stat[v] << endl;
}
ofs.close();
}
EdgeWeight query_with_info(NodeID s, NodeID t, query_info& q_info) {
double stime = GetCurrentTimeSec();
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
q_info.meet_node = numOfVertices;
double meet_distance;
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j]) {
meet_distance = (EdgeWeight)(index_s_d[i++] + index_t_d[j++]);
if (distance > meet_distance) {
distance = meet_distance;
q_info.meet_node = index_s[i];
}
}
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
};
stime = GetCurrentTimeSec() - stime;
q_info.time_cost = stime;
if (index_s.size() < index_t.size())
q_info.search_len = index_s.size();
else
q_info.search_len = index_t.size();
return distance;
}
};
class DLabel : public Label {
public:
vector<index_t> bindex_; // Backward labels.
index_t_p* bindex_p;
two_index_t_p* b_two_index_p;
DLabel() {
index_.resize(numOfVertices);
bindex_.resize(numOfVertices);
}
~DLabel() {
Free();
}
EdgeWeight query(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& bindex_t = bindex_[t].spt_v;
vector<EdgeWeight>& bindex_t_d = bindex_[t].spt_d;
for (int i = 0, j = 0; i < index_s.size(), j < bindex_t.size(); ) {
if (index_s[i] == bindex_t[j]) {
distance = min(distance, (EdgeWeight)(index_s_d[i] + bindex_t_d[j]));
++i;
++j;
}
else {
if (index_s[i] < bindex_t[j])
++i;
else
++j;
}
}
return distance;
}
EdgeWeight query(NodeID s, NodeID t, NodeID& meet, EdgeWeight& dis1, EdgeWeight& dis2) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& bindex_t = bindex_[t].spt_v;
vector<EdgeWeight>& bindex_t_d = bindex_[t].spt_d;
meet = numeric_limits<NodeID>::max();
dis1 = numeric_limits<EdgeWeight>::max();
dis2 = numeric_limits<EdgeWeight>::max();
for (int i = 0, j = 0; i < index_s.size(), j < bindex_t.size(); ) {
if (index_s[i] == bindex_t[j]) {
if (distance > (EdgeWeight)(index_s_d[i] + bindex_t_d[j])) {
distance = (EdgeWeight)(index_s_d[i] + bindex_t_d[j]);
meet = index_s[i];
dis1 = index_s_d[i];
dis2 = bindex_t_d[j];
}
++i;
++j;
}
else {
if (index_s[i] < bindex_t[j])
++i;
else
++j;
}
}
return distance;
}
inline EdgeWeight query_p(NodeID s, NodeID t) {
//EdgeWeight distance = INF_WEIGHT;
//
////const index_t_p &idx_s = index_p[s];
////const index_t_p &idx_t = bindex_p[t];
//NodeID *vs = index_p[s].spt_v;
//NodeID *vt = bindex_p[t].spt_v;
//EdgeWeight* ws = index_p[s].spt_d;
//EdgeWeight* wt = bindex_p[t].spt_d;
//_mm_prefetch(vs, _MM_HINT_T0);
//_mm_prefetch(vt, _MM_HINT_T0);
//_mm_prefetch(ws, _MM_HINT_T0);
//_mm_prefetch(wt, _MM_HINT_T0);
//for (unsigned i = 0, j = 0; ; ) {
// if (*(vs + i) == *(vt + j)) {
// if (*(vs + i) == numOfVertices) break; // Sentinel
// EdgeWeight td = *(ws + i) + *(wt + j);
// if (td < distance) distance = td;
// ++i;
// ++j;
// }
// else {
// i += *(vs + i) < *(vt + j) ? 1 : 0;
// j += *(vs + i) > *(vt + j) ? 1 : 0;
// }
//}
//return distance;
EdgeWeight distance = INF_WEIGHT;
const index_t_p &idx_s = index_p[s];
const index_t_p &idx_t = bindex_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == v2) {
if (v1 == numOfVertices) break; // Sentinel
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) distance = td;
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
return distance;
}
EdgeWeight query_with_info(NodeID s, NodeID t, query_info& q_info) {
double stime = GetCurrentTimeSec();
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
// vector<NodeID>& index_t = index_[t].spt_v;
// vector<EdgeWeight>& index_t_d = index_[t].spt_d;
vector<NodeID>& bindex_t = bindex_[t].spt_v;
vector<EdgeWeight>& bindex_t_d = bindex_[t].spt_d;
q_info.meet_node = numOfVertices;
double meet_distance;
for (int i = 0, j = 0; i < index_s.size(), j < bindex_t.size(); ) {
if (index_s[i] == bindex_t[j]) {
meet_distance = (EdgeWeight)(index_s_d[i++] + bindex_t[j++]);
if (distance > meet_distance) {
distance = meet_distance;
q_info.meet_node = index_s[i];
}
}
else {
if (index_s[i] < bindex_t[j])
++i;
else
++j;
}
};
stime = GetCurrentTimeSec() - stime;
q_info.time_cost = stime;
if (index_s.size() < bindex_t.size())
q_info.search_len = index_s.size();
else
q_info.search_len = bindex_t.size();
return distance;
}
void append(NodeID v, NodeID root, EdgeWeight distance, bool forward) { // forward(backward) search from root to vertex v.
if (forward) { // forward search from root to vertex v, hence append (root, distance) to backward index of vertex v.
bindex_[v].spt_v.push_back(root);
bindex_[v].spt_d.push_back(distance);
}
else { // backward search from root to vertex v, hence append (root, distance) to forward index of vertex v.
index_[v].spt_v.push_back(root);
index_[v].spt_d.push_back(distance);
}
}
void Free() {
if (index_.size() == 0 || bindex_.size() == 0) return;
for (int v = 0; v < numOfVertices; ++v) {
index_[v].spt_v.clear();
index_[v].spt_d.clear();
if (DIRECTED_FLAG == true) {
bindex_[v].spt_v.clear();
bindex_[v].spt_d.clear();
}
}
index_.clear();
bindex_.clear();
}
double avg_size() {
double total = 0;
for (int i = 0; i < numOfVertices; ++i) {
total += index_[i].spt_v.size() ;
total += bindex_[i].spt_v.size();
}
double avg = total / numOfVertices / 2 - 1; // We do not count the trivial labels (V, INF_WEIGHT).
return avg;
}
void print_stat() {
cout << "Average_label_size: " << avg_size() << endl;
//cout << "Maximum Label Size: " << max_size() << endl;
}
void save_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
int isize = index_[v].size();
ofs.write((const char*)&isize, sizeof(isize));
for (NodeID i = 0; i < index_[v].size(); ++i) {
ofs.write((const char*)&index_[v].spt_v[i], sizeof(index_[v].spt_v[i]));
ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
int bisize = bindex_[v].size();
ofs.write((const char*)&bisize, sizeof(bisize));
for (NodeID i = 0; i < bindex_[v].size(); ++i) {
ofs.write((const char*)&bindex_[v].spt_v[i], sizeof(bindex_[v].spt_v[i]));
ofs.write((const char*)&bindex_[v].spt_d[i], sizeof(bindex_[v].spt_d[i]));
}
}
ofs.close();
}
void load_labels(const char* load_filename) {
cout << "Loading Labels" << endl;
/*
for (NodeID v = 0; v < numOfVertices; ++v) {
free(index_p[v].spt_v);
free(index_p[v].spt_d);
}*/
//free(index_p);
index_p = NULL;
bindex_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_p = (index_t_p*)memalign(64, numOfVertices * sizeof(index_t_p));
bindex_p = (index_t_p*)memalign(64, numOfVertices * sizeof(index_t_p));
cout << numOfVertices << " vertices." << endl;
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_p &idx = index_p[v];
ifs.read((char*)&isize, sizeof(isize));
idx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_v[i] = hub;
idx.spt_d[i] = hub_weight;
}
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
index_t_p &bidx = bindex_p[v];
ifs.read((char*)&isize, sizeof(isize));
bidx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
bidx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
bidx.spt_v[i] = hub;
bidx.spt_d[i] = hub_weight;
}
}
ifs.close();
/*
index_.clear();
bindex_.clear();
ifs.open(load_filename, ios::binary | ios::in);
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_.resize(numOfVertices);
bindex_.resize(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&isize, sizeof(isize));
index_[v].spt_v.resize(isize);
index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < index_[v].size(); ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
index_[v].spt_v[i] = hub;
index_[v].spt_d[i] = hub_weight;
}
ifs.read((char*)&isize, sizeof(isize));
bindex_[v].spt_v.resize(isize);
bindex_[v].spt_d.resize(isize);
for (NodeID i = 0; i < bindex_[v].size(); ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
bindex_[v].spt_v[i] = hub;
bindex_[v].spt_d[i] = hub_weight;
}
}
ifs.close();
*/
/* for (int i = 0; i < numOfVertices; ++i) {
for (int j = 0; j < index_[i].size(); ++j)
if (index_[i].spt_v[j] != index_p[i].spt_v[j])
cout << "warning." << endl;
}*/
}
void convert_to_fewerbit(){
two_index_p = NULL;
b_two_index_p = NULL;
two_index_p = (two_index_t_p*)memalign(64, numOfVertices * sizeof(two_index_t_p));
b_two_index_p = (two_index_t_p*)memalign(64, numOfVertices * sizeof(two_index_t_p));
for (NodeID v = 0; v < numOfVertices; ++v) {
two_index_t_p &idx = two_index_p[v];
index_t_p &idx_original = index_p[v];
NodeID isize = 0;
for(NodeID i = 0; idx_original.spt_v[i] < UCHAR_MAX; ++i){
++isize;
}
idx.spt_lv = (uint8_t*)memalign(64, (isize + 1) * sizeof(uint8_t));
idx.spt_ld = (EdgeWeight*)memalign(64, (isize + 1) * sizeof(EdgeWeight));
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < isize; ++i) {
uint8_t hub;
EdgeWeight hub_weight;
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_lv[i] = idx_original.spt_v[i];
idx.spt_ld[i] = idx_original.spt_d[i];
}
idx.spt_lv[isize] = UCHAR_MAX;
idx.spt_ld[isize] = INF_WEIGHT;
NodeID larger_size = 0;
for(NodeID i = isize; idx_original.spt_v[i] != numOfVertices; ++i){
++larger_size;
}
idx.spt_v = (NodeID*)memalign(64, larger_size * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, larger_size * sizeof(EdgeWeight));
for (NodeID i = 0; i < larger_size; ++i) {
uint8_t hub;
EdgeWeight hub_weight;
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_v[i] = idx_original.spt_v[i + isize];
idx.spt_d[i] = idx_original.spt_d[i + isize];
}
two_index_t_p &b_idx = b_two_index_p[v];
index_t_p &b_idx_original = bindex_p[v];
isize = 0;
for(NodeID i = 0; b_idx_original.spt_v[i] < UCHAR_MAX; ++i){
++isize;
}
b_idx.spt_lv = (uint8_t*)memalign(64, (isize + 1) * sizeof(uint8_t));
b_idx.spt_ld = (EdgeWeight*)memalign(64, (isize + 1) * sizeof(EdgeWeight));
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < isize; ++i) {
uint8_t hub;
EdgeWeight hub_weight;
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
b_idx.spt_lv[i] = b_idx_original.spt_v[i];
b_idx.spt_ld[i] = b_idx_original.spt_d[i];
}
b_idx.spt_lv[isize] = UCHAR_MAX;
b_idx.spt_ld[isize] = INF_WEIGHT;
larger_size = 0;
for(NodeID i = isize; b_idx_original.spt_v[i] != numOfVertices; ++i){
++larger_size;
}
b_idx.spt_v = (NodeID*)memalign(64, larger_size * sizeof(NodeID));
b_idx.spt_d = (EdgeWeight*)memalign(64, larger_size * sizeof(EdgeWeight));
for (NodeID i = 0; i < larger_size; ++i) {
uint8_t hub;
EdgeWeight hub_weight;
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
b_idx.spt_v[i] = b_idx_original.spt_v[i + isize];
b_idx.spt_d[i] = b_idx_original.spt_d[i + isize];
}
}
}
void save_labels_iteration_stats(const char* save_filename) {
vector<NodeID> stat(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
for (NodeID i = 0; i < index_[v].size(); ++i)
stat[index_[v].spt_v[i]]++;
for (NodeID i = 0; i < bindex_[v].size(); ++i)
stat[bindex_[v].spt_v[i]]++;
}
ofstream ofs(save_filename);
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs << stat[v] << endl;
}
ofs.close();
}
};
class DPLabel{
public:
vector<index_t_path> index_;
vector<index_t_path> bindex_; // Backward labels.
index_t_path_p* index_p;
index_t_path_p* bindex_p;
DPLabel() {
index_.resize(numOfVertices);
bindex_.resize(numOfVertices);
}
~DPLabel() {
Free();
}
inline EdgeWeight query_path(NodeID s, NodeID t, vector<NodeID>& rank, vector<NodeID>& inv) {
EdgeWeight distance = INF_WEIGHT;
NodeID meetnode = numOfVertices;
NodeID s_parent;
NodeID t_parent;
const index_t_path_p &idx_s = index_p[s];
const index_t_path_p &idx_t = bindex_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_p[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_p[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) {
distance = td;
//if (v1 < meetnode) {
meetnode = v1;
s_parent = idx_s.spt_p[i];
t_parent = idx_t.spt_p[j];
// }
}
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
//Next, retrieve path from s - meetnode and meetnode - t.
vector<NodeID> path_from_s;
vector<NodeID> path_to_t;
path_from_s.push_back(s_parent);
path_to_t.push_back(t_parent);
/* if (s == 194569 && t == 20072)
cout << "debug." << " meet: " << meetnode << " sparent:" << s_parent << " tparent:" << t_parent << endl;*/
NodeID inv_meetnode = inv[meetnode];
while (path_from_s.back() != inv_meetnode) {
/*if (s == 194569 && t == 20072)
cout << "s meet:" << path_from_s.back() << endl;*/
const index_t_path_p &idx_from_s = index_p[path_from_s.back()];
_mm_prefetch(&idx_from_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_from_s.spt_p[0], _MM_HINT_T0);
// vector<NodeID>& index_from_s = index_[path_from_s.back()].spt_v;
for (int i = 0; ; ++i) {
if (idx_from_s.spt_v[i] == numOfVertices) break;
if (idx_from_s.spt_v[i] == meetnode) {
path_from_s.push_back(idx_from_s.spt_p[i]);
break;
}
}
}
while (path_to_t.back() != inv_meetnode) {
/*if (s == 194569 && t == 20072)
cout << "t meet:" << path_to_t.back() << endl;*/
// vector<NodeID>& index_to_t = index_[path_to_t.back()].spt_v;
const index_t_path_p &idx_to_t = bindex_p[path_to_t.back()];
_mm_prefetch(&idx_to_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_to_t.spt_p[0], _MM_HINT_T0);
for (int i = 0; ; ++i) {
if (idx_to_t.spt_v[i] == numOfVertices) break;
if (idx_to_t.spt_v[i] == meetnode) {
path_to_t.push_back(idx_to_t.spt_p[i]);
break;
}
}
}
return distance;
}
EdgeWeight query_path_p(NodeID s, NodeID t, vector<NodeID>& rank, vector<NodeID>& inv) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& bindex_t = bindex_[t].spt_v;
vector<EdgeWeight>& bindex_t_d = bindex_[t].spt_d;
NodeID meetnode = numOfVertices;
int s_parent;
int t_parent;
for (int i = 0, j = 0; i < index_s.size(), j < bindex_t.size(); ) {
if (index_s[i] == bindex_t[j]) {
if (distance >(EdgeWeight)(index_s_d[i] + bindex_t_d[j])) {
distance = (EdgeWeight)(index_s_d[i] + bindex_t_d[j]);
// if (index_s[i] < meetnode) {
meetnode = index_s[i];
s_parent = index_[s].spt_p[i];
t_parent = index_[t].spt_p[j];
// }
}
//distance = min(distance, (EdgeWeight)(index_s_d[i] + bindex_t_d[j]));
++i;
++j;
}
else {
if (index_s[i] < bindex_t[j])
++i;
else
++j;
}
}
//Next, retrieve path from s - meetnode and meetnode - t.
vector<NodeID> path_from_s;
vector<NodeID> path_to_t;
path_from_s.push_back(s_parent);
path_to_t.push_back(t_parent);
/* if (s == 194569 && t == 20072)
cout << "debug." << " meet: " << meetnode << " sparent:" << s_parent << " tparent:" << t_parent << endl;*/
while (path_from_s.back() != inv[meetnode]) {
/*if (s == 194569 && t == 20072)
cout << "s meet:" << path_from_s.back() << endl;*/
vector<NodeID>& index_from_s = index_[path_from_s.back()].spt_v;
for (int i = 0; i < index_from_s.size(); ++i) {
if (index_from_s[i] == meetnode) {
path_from_s.push_back(index_[path_from_s.back()].spt_p[i]);
break;
}
}
}
while (path_to_t.back() != inv[meetnode]) {
/*if (s == 194569 && t == 20072)
cout << "t meet:" << path_to_t.back() << endl;*/
vector<NodeID>& index_to_t = bindex_[path_to_t.back()].spt_v;
for (int i = 0; i < index_to_t.size(); ++i) {
if (index_to_t[i] == meetnode) {
path_to_t.push_back(bindex_[path_to_t.back()].spt_p[i]);
break;
}
}
}
//for (int i = 0; i < path_from_s.size(); ++i)
// path_from_s[i] = inv[path_from_s[i]];
//for (int i = 0; i < path_to_t.size(); ++i)
// path_to_t[i] = inv[path_to_t[i]];
return path_from_s.size() + path_to_t.size();
}
void Free() {
if (index_.size() == 0 || bindex_.size() == 0) return;
for (int v = 0; v < numOfVertices; ++v) {
index_[v].spt_v.clear();
index_[v].spt_d.clear();
if (DIRECTED_FLAG == true) {
bindex_[v].spt_v.clear();
bindex_[v].spt_d.clear();
}
}
index_.clear();
bindex_.clear();
}
double avg_size() {
double total = 0;
for (int i = 0; i < numOfVertices; ++i) {
total += index_[i].spt_v.size();
total += bindex_[i].spt_v.size();
}
double avg = total / numOfVertices / 2 - 1; // We do not count the trivial labels (V, INF_WEIGHT).
return avg;
}
void print_stat() {
cout << "Average_label_size: " << avg_size() << endl;
//cout << "Maximum Label Size: " << max_size() << endl;
}
void save_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
int isize = index_[v].size();
ofs.write((const char*)&isize, sizeof(isize));
for (NodeID i = 0; i < index_[v].size(); ++i) {
ofs.write((const char*)&index_[v].spt_v[i], sizeof(index_[v].spt_v[i]));
ofs.write((const char*)&index_[v].spt_p[i], sizeof(index_[v].spt_p[i]));
ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
int bisize = bindex_[v].size();
ofs.write((const char*)&bisize, sizeof(bisize));
for (NodeID i = 0; i < bindex_[v].size(); ++i) {
ofs.write((const char*)&bindex_[v].spt_v[i], sizeof(bindex_[v].spt_v[i]));
ofs.write((const char*)&bindex_[v].spt_p[i], sizeof(bindex_[v].spt_p[i]));
ofs.write((const char*)&bindex_[v].spt_d[i], sizeof(bindex_[v].spt_d[i]));
}
}
ofs.close();
}
void load_labels(const char* load_filename) {
index_p = NULL;
bindex_p = NULL;
ifstream ifs(load_filename, ios::binary | ios::in);
NodeID isize;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_p = (index_t_path_p*)memalign(64, numOfVertices * sizeof(index_t_path_p));
bindex_p = (index_t_path_p*)memalign(64, numOfVertices * sizeof(index_t_path_p));
cout << numOfVertices << " vertices." << endl;
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_path_p &idx = index_p[v];
ifs.read((char*)&isize, sizeof(isize));
idx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_p = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
NodeID hub_parent;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_parent, sizeof(hub_parent));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_v[i] = hub;
idx.spt_d[i] = hub_weight;
idx.spt_p[i] = hub_parent;
}
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
index_t_path_p &bidx = bindex_p[v];
ifs.read((char*)&isize, sizeof(isize));
bidx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
bidx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
bidx.spt_p = (NodeID*)memalign(64, isize * sizeof(NodeID));
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
NodeID hub_parent;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_parent, sizeof(hub_parent));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
bidx.spt_v[i] = hub;
bidx.spt_d[i] = hub_weight;
bidx.spt_p[i] = hub_parent;
}
}
ifs.close();
/*index_.clear();
bindex_.clear();
ifstream ifs(load_filename, ios::binary | ios::in);
NodeID isize;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_.resize(numOfVertices);
bindex_.resize(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&isize, sizeof(isize));
index_[v].spt_v.resize(isize);
index_[v].spt_p.resize(isize);
index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < index_[v].size(); ++i) {
NodeID hub;
NodeID parent;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&parent, sizeof(parent));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
index_[v].spt_v[i] = hub;
index_[v].spt_p[i] = parent;
index_[v].spt_d[i] = hub_weight;
}
ifs.read((char*)&isize, sizeof(isize));
bindex_[v].spt_v.resize(isize);
bindex_[v].spt_d.resize(isize);
for (NodeID i = 0; i < bindex_[v].size(); ++i) {
NodeID hub;
NodeID parent;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&parent, sizeof(parent));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
bindex_[v].spt_v[i] = hub;
bindex_[v].spt_p[i] = parent;
bindex_[v].spt_d[i] = hub_weight;
}
}
ifs.close();*/
}
inline EdgeWeight query_p(NodeID s, NodeID t) {
//EdgeWeight distance = INF_WEIGHT;
//
////const index_t_p &idx_s = index_p[s];
////const index_t_p &idx_t = bindex_p[t];
//NodeID *vs = index_p[s].spt_v;
//NodeID *vt = bindex_p[t].spt_v;
//EdgeWeight* ws = index_p[s].spt_d;
//EdgeWeight* wt = bindex_p[t].spt_d;
//_mm_prefetch(vs, _MM_HINT_T0);
//_mm_prefetch(vt, _MM_HINT_T0);
//_mm_prefetch(ws, _MM_HINT_T0);
//_mm_prefetch(wt, _MM_HINT_T0);
//for (unsigned i = 0, j = 0; ; ) {
// if (*(vs + i) == *(vt + j)) {
// if (*(vs + i) == numOfVertices) break; // Sentinel
// EdgeWeight td = *(ws + i) + *(wt + j);
// if (td < distance) distance = td;
// ++i;
// ++j;
// }
// else {
// i += *(vs + i) < *(vt + j) ? 1 : 0;
// j += *(vs + i) > *(vt + j) ? 1 : 0;
// }
//}
//return distance;
EdgeWeight distance = INF_WEIGHT;
const index_t_path_p &idx_s = index_p[s];
const index_t_path_p &idx_t = bindex_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == v2) {
if (v1 == numOfVertices) break; // Sentinel
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) distance = td;
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
return distance;
}
};
template<int kNumBitParallelRoots = 50>
class BPLabel {
public:
index_t_bp<kNumBitParallelRoots>* index_bp;
BPLabel() {
}
~BPLabel() {
//Free();
}
EdgeWeight query_p(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
NodeID *vs = index_bp[s].spt_v;
NodeID *vt = index_bp[t].spt_v;
EdgeWeight* ws = index_bp[s].spt_d;
EdgeWeight* wt = index_bp[t].spt_d;
_mm_prefetch(vs, _MM_HINT_T0);
_mm_prefetch(vt, _MM_HINT_T0);
_mm_prefetch(ws, _MM_HINT_T0);
_mm_prefetch(wt, _MM_HINT_T0);
for (int i = 0; i < kNumBitParallelRoots; ++i) {
EdgeWeight td = index_bp[s].bpspt_d[i] + index_bp[t].bpspt_d[i];
if (td - 2 <= distance) {
td +=
(index_bp[s].bpspt_s[i][0] & index_bp[t].bpspt_s[i][0]) ? -2 :
((index_bp[s].bpspt_s[i][0] & index_bp[t].bpspt_s[i][1]) | (index_bp[s].bpspt_s[i][1] & index_bp[t].bpspt_s[i][0]))
? -1 : 0;
if (td < distance) distance = td;
}
}
for (unsigned i = 0, j = 0; ; ) {
if (*(vs + i) == *(vt + j)) {
if (*(vs + i) == numOfVertices) break; // Sentinel
EdgeWeight td = *(ws + i) + *(wt + j);
if (td < distance) distance = td;
++i;
++j;
}
else {
i += *(vs + i) < *(vt + j) ? 1 : 0;
j += *(vs + i) > *(vt + j) ? 1 : 0;
}
}
return distance;
}
EdgeWeight query_p(NodeID s, NodeID t, bool& isBP) {
EdgeWeight distance = INF_WEIGHT;
const index_t_bp<kNumBitParallelRoots> &idx_s = index_bp[s];
const index_t_bp<kNumBitParallelRoots> &idx_t = index_bp[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
isBP = false;
for (int i = 0; i < kNumBitParallelRoots; ++i) {
EdgeWeight td = index_bp[s].bpspt_d[i] + index_bp[t].bpspt_d[i];
if (td - 2 <= distance) {
td +=
(index_bp[s].bpspt_s[i][0] & index_bp[t].bpspt_s[i][0]) ? -2 :
((index_bp[s].bpspt_s[i][0] & index_bp[t].bpspt_s[i][1]) | (index_bp[s].bpspt_s[i][1] & index_bp[t].bpspt_s[i][0]))
? -1 : 0;
if (td < distance) {
distance = td;
isBP = true;
}
}
}
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) {
distance = td;
isBP = false;
}
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
return distance;
}
/*
NodeID max_size() {
NodeID maxsize = numeric_limits<NodeID>::min();
for (int i = 0; i < V; ++i) maxsize = max(maxsize, index_[i].spt_v.size());
return maxsize;
}*/
void print_stat() {
cout << "Average_label_size: " << avg_size() << endl;
//cout << "Maximum Label Size: " << max_size() << endl;
}
double avg_size() {
double lab_count = 0;
for (NodeID v = 0; v < numOfVertices; ++v) {
NodeID isize;
for (isize = 1; index_bp[v].spt_v[isize - 1] != numOfVertices; ++isize) continue;
lab_count += isize;
}
lab_count = (double)lab_count / (double)numOfVertices - 1;
return lab_count;
}
void Free() {
for (int v = 0; v < numOfVertices; ++v) {
free(index_bp[v].spt_v);
free(index_bp[v].spt_d);
}
free(index_bp);
index_bp = NULL;
}
void save_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
int knumbit = kNumBitParallelRoots;
ofs.write((const char*)&knumbit, sizeof(knumbit));
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_bp<kNumBitParallelRoots> &idx = index_bp[v];
for (int i = 0; i < kNumBitParallelRoots; ++i) {
EdgeWeight d = idx.bpspt_d[i];
uint64_t a = idx.bpspt_s[i][0];
uint64_t b = idx.bpspt_s[i][1];
ofs.write((const char*)&d, sizeof(d));
ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&b, sizeof(b));
}
NodeID isize;
for (isize = 1; idx.spt_v[isize - 1] != numOfVertices; ++isize) continue; // Find the sentinel
ofs.write((const char*)&isize, sizeof(isize));
for (NodeID i = 0; i < isize; ++i) {
ofs.write((const char*)&idx.spt_v[i], sizeof(idx.spt_v[i]));
ofs.write((const char*)&idx.spt_d[i], sizeof(idx.spt_d[i]));
}
}
ofs.close();
}
void load_labels(const char* load_filename){
index_bp = NULL;
int knumbit;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
ifs.read((char*)&knumbit, sizeof(isize));
if (knumbit != kNumBitParallelRoots) {
cout << knumbit << "!=" << kNumBitParallelRoots << endl;
return;
}
index_bp = (index_t_bp<kNumBitParallelRoots>*)memalign(64, numOfVertices * sizeof(index_t_bp<kNumBitParallelRoots>));
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_bp<kNumBitParallelRoots> &idx = index_bp[v];
for (int i = 0; i < kNumBitParallelRoots; ++i) {
//idx.bpspt_s[i] = (uint64_t*)memalign(64, 2 * sizeof(uint64_t));
EdgeWeight d;
uint64_t a, b;
ifs.read((char*)&d, sizeof(EdgeWeight));
ifs.read((char*)&a, sizeof(uint64_t));
ifs.read((char*)&b, sizeof(uint64_t));
idx.bpspt_d[i] = d;
idx.bpspt_s[i][0] = a;
idx.bpspt_s[i][1] = b;
}
ifs.read((char*)&isize, sizeof(isize));
idx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
idx.spt_v[i] = hub;
idx.spt_d[i] = hub_weight;
}
}
ifs.close();
}
};
template<int kNumBitParallelRoots = 50>
class DBPLabel {
public:
index_t_bp<kNumBitParallelRoots>* index_bp;
index_t_bp<kNumBitParallelRoots>* bindex_bp;
DBPLabel() {
}
~DBPLabel() {
}
/*EdgeWeight query_p(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
NodeID *vs = index_p[s].spt_v;
NodeID *vt = index_p[t].spt_v;
EdgeWeight* ws = index_p[s].spt_d;
EdgeWeight* wt = index_p[t].spt_d;
_mm_prefetch(vs, _MM_HINT_T0);
_mm_prefetch(vt, _MM_HINT_T0);
_mm_prefetch(ws, _MM_HINT_T0);
_mm_prefetch(wt, _MM_HINT_T0);
for (unsigned i = 0, j = 0; ; ) {
if (*(vs + i) == *(vt + j)) {
if (*(vs + i) == numOfVertices) break; // Sentinel
EdgeWeight td = *(ws + i) + *(wt + j);
if (td < distance) distance = td;
++i;
++j;
}
else {
i += *(vs + i) < *(vt + j) ? 1 : 0;
j += *(vs + i) > *(vt + j) ? 1 : 0;
}
}
return distance;
//EdgeWeight distance = INF_WEIGHT;
//const index_t_p &idx_s = index_p[s];
//const index_t_p &idx_t = index_p[t];
//_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
//_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
//_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
//_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
//for (int i = 0, j = 0; ; ) {
// NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
// if (v1 == numOfVertices) break; // Sentinel
// if (v1 == v2) {
// EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
// if (td < distance) distance = td;
// ++i;
// ++j;
// }
// else {
// i += v1 < v2 ? 1 : 0;
// j += v1 > v2 ? 1 : 0;
// }
//}
//return distance;
}
*/
/*
NodeID max_size() {
NodeID maxsize = numeric_limits<NodeID>::min();
for (int i = 0; i < V; ++i) maxsize = max(maxsize, index_[i].spt_v.size());
return maxsize;
}*/
EdgeWeight query_p(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
NodeID *vs = index_bp[s].spt_v;
NodeID *vt = bindex_bp[t].spt_v;
EdgeWeight* ws = index_bp[s].spt_d;
EdgeWeight* wt = bindex_bp[t].spt_d;
_mm_prefetch(vs, _MM_HINT_T0);
_mm_prefetch(vt, _MM_HINT_T0);
_mm_prefetch(ws, _MM_HINT_T0);
_mm_prefetch(wt, _MM_HINT_T0);
for (int i = 0; i < kNumBitParallelRoots; ++i) {
EdgeWeight td = index_bp[s].bpspt_d[i] + bindex_bp[t].bpspt_d[i];
if (td - 2 <= distance) {
td +=
(index_bp[s].bpspt_s[i][0] & bindex_bp[t].bpspt_s[i][0]) ? -2 :
((index_bp[s].bpspt_s[i][0] & bindex_bp[t].bpspt_s[i][1]) | (index_bp[s].bpspt_s[i][1] & bindex_bp[t].bpspt_s[i][0]))
? -1 : 0;
if (td < distance) distance = td;
}
}
for (unsigned i = 0, j = 0; ; ) {
if (*(vs + i) == *(vt + j)) {
if (*(vs + i) == numOfVertices) break; // Sentinel
EdgeWeight td = *(ws + i) + *(wt + j);
if (td < distance) distance = td;
++i;
++j;
}
else {
i += *(vs + i) < *(vt + j) ? 1 : 0;
j += *(vs + i) > *(vt + j) ? 1 : 0;
}
}
return distance;
}
EdgeWeight query_p(NodeID s, NodeID t, bool& isBP) {
isBP = false;
EdgeWeight distance = INF_WEIGHT;
NodeID *vs = index_bp[s].spt_v;
NodeID *vt = bindex_bp[t].spt_v;
EdgeWeight* ws = index_bp[s].spt_d;
EdgeWeight* wt = bindex_bp[t].spt_d;
_mm_prefetch(vs, _MM_HINT_T0);
_mm_prefetch(vt, _MM_HINT_T0);
_mm_prefetch(ws, _MM_HINT_T0);
_mm_prefetch(wt, _MM_HINT_T0);
for (int i = 0; i < kNumBitParallelRoots; ++i) {
EdgeWeight td = index_bp[s].bpspt_d[i] + bindex_bp[t].bpspt_d[i];
if (td - 2 <= distance) {
td +=
(index_bp[s].bpspt_s[i][0] & bindex_bp[t].bpspt_s[i][0]) ? -2 :
((index_bp[s].bpspt_s[i][0] & bindex_bp[t].bpspt_s[i][1]) | (index_bp[s].bpspt_s[i][1] & bindex_bp[t].bpspt_s[i][0]))
? -1 : 0;
if (td < distance) {
distance = td;
isBP = true;
}
}
}
for (unsigned i = 0, j = 0; ; ) {
if (*(vs + i) == *(vt + j)) {
if (*(vs + i) == numOfVertices) break; // Sentinel
EdgeWeight td = *(ws + i) + *(wt + j);
if (td < distance) {
distance = td;
isBP = false;
}
++i;
++j;
}
else {
i += *(vs + i) < *(vt + j) ? 1 : 0;
j += *(vs + i) > *(vt + j) ? 1 : 0;
}
}
return distance;
}
void print_stat() {
cout << "Average_label_size: " << avg_size() << endl;
//cout << "Maximum Label Size: " << max_size() << endl;
}
double avg_size() {
double lab_count = 0;
for (NodeID v = 0; v < numOfVertices; ++v) {
NodeID isize;
for (isize = 1; index_bp[v].spt_v[isize - 1] != numOfVertices; ++isize) continue;
lab_count += isize;
for (isize = 1; bindex_bp[v].spt_v[isize - 1] != numOfVertices; ++isize) continue;
}
lab_count = (double)lab_count / (double)numOfVertices - 1 / (double)2;
return lab_count;
}
void Free() {
for (int v = 0; v < numOfVertices; ++v) {
free(index_bp[v].spt_v);
free(index_bp[v].spt_d);
free(index_bp[v].bpspt_d);
free(index_bp[v].bpspt_s);
free(bindex_bp[v].spt_v);
free(bindex_bp[v].spt_d);
free(bindex_bp[v].bpspt_d);
free(bindex_bp[v].bpspt_s);
}
free(index_bp);
free(bindex_bp);
index_bp = NULL;
bindex_bp = NULL;
}
void save_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
int knumbit = kNumBitParallelRoots;
ofs.write((const char*)&knumbit, sizeof(knumbit));
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_bp<kNumBitParallelRoots> &idx = index_bp[v];
index_t_bp<kNumBitParallelRoots> &r_idx = bindex_bp[v];
for (int i = 0; i < kNumBitParallelRoots; ++i) {
EdgeWeight d = idx.bpspt_d[i];
uint64_t a = idx.bpspt_s[i][0];
uint64_t b = idx.bpspt_s[i][1];
ofs.write((const char*)&d, sizeof(d));
ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&b, sizeof(b));
}
for (int i = 0; i < kNumBitParallelRoots; ++i) {
EdgeWeight d = r_idx.bpspt_d[i];
uint64_t a = r_idx.bpspt_s[i][0];
uint64_t b = r_idx.bpspt_s[i][1];
ofs.write((const char*)&d, sizeof(d));
ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&b, sizeof(b));
}
NodeID isize;
for (isize = 1; idx.spt_v[isize - 1] != numOfVertices; ++isize) continue; // Find the sentinel
ofs.write((const char*)&isize, sizeof(isize));
for (NodeID i = 0; i < isize; ++i) {
ofs.write((const char*)&idx.spt_v[i], sizeof(idx.spt_v[i]));
ofs.write((const char*)&idx.spt_d[i], sizeof(idx.spt_d[i]));
}
for (isize = 1; r_idx.spt_v[isize - 1] != numOfVertices; ++isize) continue; // Find the sentinel
ofs.write((const char*)&isize, sizeof(isize));
for (NodeID i = 0; i < isize; ++i) {
ofs.write((const char*)&r_idx.spt_v[i], sizeof(r_idx.spt_v[i]));
ofs.write((const char*)&r_idx.spt_d[i], sizeof(r_idx.spt_d[i]));
}
}
ofs.close();
}
void load_labels(const char* load_filename) {
index_bp = NULL;
int knumbit;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
ifs.read((char*)&knumbit, sizeof(isize));
if (knumbit != kNumBitParallelRoots) {
cout << knumbit << "!=" << kNumBitParallelRoots << endl;
return;
}
index_bp = (index_t_bp<kNumBitParallelRoots>*)memalign(64, numOfVertices * sizeof(index_t_bp<kNumBitParallelRoots>));
bindex_bp = (index_t_bp<kNumBitParallelRoots>*)memalign(64, numOfVertices * sizeof(index_t_bp<kNumBitParallelRoots>));
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_bp<kNumBitParallelRoots> &idx = index_bp[v];
index_t_bp<kNumBitParallelRoots> &r_idx = bindex_bp[v];
for (int i = 0; i < kNumBitParallelRoots; ++i) {
//idx.bpspt_s[i] = (uint64_t*)memalign(64, 2 * sizeof(uint64_t));
EdgeWeight d;
uint64_t a, b;
ifs.read((char*)&d, sizeof(EdgeWeight));
ifs.read((char*)&a, sizeof(uint64_t));
ifs.read((char*)&b, sizeof(uint64_t));
idx.bpspt_d[i] = d;
idx.bpspt_s[i][0] = a;
idx.bpspt_s[i][1] = b;
}
for (int i = 0; i < kNumBitParallelRoots; ++i) {
//idx.bpspt_s[i] = (uint64_t*)memalign(64, 2 * sizeof(uint64_t));
EdgeWeight d;
uint64_t a, b;
ifs.read((char*)&d, sizeof(EdgeWeight));
ifs.read((char*)&a, sizeof(uint64_t));
ifs.read((char*)&b, sizeof(uint64_t));
r_idx.bpspt_d[i] = d;
r_idx.bpspt_s[i][0] = a;
r_idx.bpspt_s[i][1] = b;
}
ifs.read((char*)&isize, sizeof(isize));
idx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
idx.spt_v[i] = hub;
idx.spt_d[i] = hub_weight;
}
ifs.read((char*)&isize, sizeof(isize));
r_idx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
r_idx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
r_idx.spt_v[i] = hub;
r_idx.spt_d[i] = hub_weight;
}
}
ifs.close();
}
};
#endif |
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImage method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _PixelChannels
{
double
channel[MaxPixelChannels];
} PixelChannels;
static PixelChannels **DestroyPixelThreadSet(const Image *images,
PixelChannels **pixels)
{
register ssize_t
i;
size_t
rows;
assert(pixels != (PixelChannels **) NULL);
rows=MagickMax(GetImageListLength(images),(size_t)
GetMagickResourceLimit(ThreadResource));
for (i=0; i < (ssize_t) rows; i++)
if (pixels[i] != (PixelChannels *) NULL)
pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]);
pixels=(PixelChannels **) RelinquishMagickMemory(pixels);
return(pixels);
}
static PixelChannels **AcquirePixelThreadSet(const Image *images)
{
const Image
*next;
PixelChannels
**pixels;
register ssize_t
i;
size_t
columns,
number_images,
rows;
number_images=GetImageListLength(images);
rows=MagickMax(number_images,(size_t) GetMagickResourceLimit(ThreadResource));
pixels=(PixelChannels **) AcquireQuantumMemory(rows,sizeof(*pixels));
if (pixels == (PixelChannels **) NULL)
return((PixelChannels **) NULL);
(void) memset(pixels,0,rows*sizeof(*pixels));
columns=MagickMax(number_images,MaxPixelChannels);
for (next=images; next != (Image *) NULL; next=next->next)
columns=MagickMax(next->columns,columns);
for (i=0; i < (ssize_t) rows; i++)
{
register ssize_t
j;
pixels[i]=(PixelChannels *) AcquireQuantumMemory(columns,sizeof(**pixels));
if (pixels[i] == (PixelChannels *) NULL)
return(DestroyPixelThreadSet(images,pixels));
for (j=0; j < (ssize_t) columns; j++)
{
register ssize_t
k;
for (k=0; k < MaxPixelChannels; k++)
pixels[i][j].channel[k]=0.0;
}
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelChannels
*color_1,
*color_2;
double
distance;
register ssize_t
i;
color_1=(const PixelChannels *) x;
color_2=(const PixelChannels *) y;
distance=0.0;
for (i=0; i < MaxPixelChannels; i++)
distance+=color_1->channel[i]-(double) color_2->channel[i];
return(distance < 0.0 ? -1 : distance > 0.0 ? 1 : 0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel,
const MagickEvaluateOperator op,const double value)
{
double
result;
register ssize_t
i;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(double) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a positive
result. It differs from % or fmod() that returns a 'truncated modulus'
result, where floor() is replaced by trunc() and could return a
negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(double) ((ssize_t) pixel & (ssize_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,GaussianNoise,
value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise,
value);
break;
}
case InverseLogEvaluateOperator:
{
result=(QuantumRange*pow((value+1.0),QuantumScale*pixel)-1.0)*
PerceptibleReciprocal(value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result*=2.0;
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+
1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(double) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(double) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(double) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(double) ((ssize_t) pixel | (ssize_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise,
value);
break;
}
case PowEvaluateOperator:
{
if (pixel < 0)
result=(double) -(QuantumRange*pow((double) -(QuantumScale*pixel),
(double) value));
else
result=(double) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result/=2.0;
break;
}
case RootMeanSquareEvaluateOperator:
{
result=((double) pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(double) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(double) (((double) pixel > value) ? QuantumRange : pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise,
value);
break;
}
case XorEvaluateOperator:
{
result=(double) ((ssize_t) pixel ^ (ssize_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
rows;
q=images;
columns=images->columns;
rows=images->rows;
for (p=images; p != (Image *) NULL; p=p->next)
{
if (p->number_channels > q->number_channels)
q=p;
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view,
**image_view;
const Image
*next;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict evaluate_pixels;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
j,
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
image_view=(CacheView **) AcquireQuantumMemory(number_images,
sizeof(*image_view));
if (image_view == (CacheView **) NULL)
{
image=DestroyImage(image);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(image);
}
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
image_view[j]=AcquireVirtualCacheView(next,exception);
next=GetNextImageInList(next);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Image
*next;
const int
id = GetOpenMPThreadId();
const Quantum
**p;
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
j;
if (status == MagickFalse)
continue;
p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p));
if (p == (const Quantum **) NULL)
{
status=MagickFalse;
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
images->filename);
continue;
}
for (j=0; j < (ssize_t) number_images; j++)
{
p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1,
exception);
if (p[j] == (const Quantum *) NULL)
break;
}
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
evaluate_pixel[j].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p[j]),op,
evaluate_pixel[j].channel[i]);
}
p[j]+=GetPixelChannels(next);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
q[i]=ClampToQuantum(evaluate_pixel[number_images/2].channel[i]);
}
q+=GetPixelChannels(image);
}
p=(const Quantum **) RelinquishMagickMemory(p);
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Image
*next;
const int
id = GetOpenMPThreadId();
const Quantum
**p;
register ssize_t
i,
x;
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p));
if (p == (const Quantum **) NULL)
{
status=MagickFalse;
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
images->filename);
continue;
}
for (j=0; j < (ssize_t) number_images; j++)
{
p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1,
exception);
if (p[j] == (const Quantum *) NULL)
break;
}
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[x].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p[j]),j == 0 ?
AddEvaluateOperator : op,evaluate_pixel[x].channel[i]);
}
p[j]+=GetPixelChannels(next);
}
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (op)
{
case MeanEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]/=(double) number_images;
break;
}
case MultiplyEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
evaluate_pixel[x].channel[i]*=QuantumScale;
}
break;
}
case RootMeanSquareEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/
number_images);
break;
}
default:
break;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
p=(const Quantum **) RelinquishMagickMemory(p);
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
for (j=0; j < (ssize_t) number_images; j++)
image_view[j]=DestroyCacheView(image_view[j]);
image_view=(CacheView **) RelinquishMagickMemory(image_view);
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
result;
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
result=ApplyEvaluateOperator(random_info[id],q[i],op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
q[i]=ClampToQuantum(result);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EvaluateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImage method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
double
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+
c1*x^2+c2*x+c3).
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel+parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
double
amplitude,
bias,
frequency,
phase;
/*
Sinusoid: frequency, phase, amplitude, bias.
*/
frequency=(number_parameters >= 1) ? parameters[0] : 1.0;
phase=(number_parameters >= 2) ? parameters[1] : 0.0;
amplitude=(number_parameters >= 3) ? parameters[2] : 0.5;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (QuantumRange*(amplitude*sin((double) (2.0*
MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias));
break;
}
case ArcsinFunction:
{
double
bias,
center,
range,
width;
/*
Arcsin (peged at range limits for invalid results): width, center,
range, and bias.
*/
width=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=2.0/width*(QuantumScale*pixel-center);
if ( result <= -1.0 )
result=bias-range/2.0;
else
if (result >= 1.0)
result=bias+range/2.0;
else
result=(double) (range/MagickPI*asin((double) result)+bias);
result*=QuantumRange;
break;
}
case ArctanFunction:
{
double
center,
bias,
range,
slope;
/*
Arctan: slope, center, range, and bias.
*/
slope=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (MagickPI*slope*(QuantumScale*pixel-center));
result=(double) (QuantumRange*(range/MagickPI*atan((double)
result)+bias));
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateFunctionImage(image,function,number_parameters,parameters,
exception) != MagickFalse)
return(MagickTrue);
#endif
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyFunction(q[i],function,number_parameters,parameters,
exception);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FunctionImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageEntropy method is:
%
% MagickBooleanType GetImageEntropy(const Image *image,double *entropy,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*entropy=channel_statistics[CompositePixelChannel].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageExtrema method is:
%
% MagickBooleanType GetImageExtrema(const Image *image,size_t *minima,
% size_t *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageRange(image,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageKurtosis() returns the kurtosis and skewness of one or more image
% channels.
%
% The format of the GetImageKurtosis method is:
%
% MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis,
% double *skewness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*kurtosis=channel_statistics[CompositePixelChannel].kurtosis;
*skewness=channel_statistics[CompositePixelChannel].skewness;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMean() returns the mean and standard deviation of one or more image
% channels.
%
% The format of the GetImageMean method is:
%
% MagickBooleanType GetImageMean(const Image *image,double *mean,
% double *standard_deviation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*mean=channel_statistics[CompositePixelChannel].mean;
*standard_deviation=
channel_statistics[CompositePixelChannel].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e d i a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMedian() returns the median pixel of one or more image channels.
%
% The format of the GetImageMedian method is:
%
% MagickBooleanType GetImageMedian(const Image *image,double *median,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o median: the average value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMedian(const Image *image,double *median,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*median=channel_statistics[CompositePixelChannel].median;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageMoments method is:
%
% ChannelMoments *GetImageMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
channels++;
}
return((size_t) (channels == 0 ? 1 : channels));
}
MagickExport ChannelMoments *GetImageMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
CacheView
*image_view;
ChannelMoments
*channel_moments;
double
M00[MaxPixelChannels+1],
M01[MaxPixelChannels+1],
M02[MaxPixelChannels+1],
M03[MaxPixelChannels+1],
M10[MaxPixelChannels+1],
M11[MaxPixelChannels+1],
M12[MaxPixelChannels+1],
M20[MaxPixelChannels+1],
M21[MaxPixelChannels+1],
M22[MaxPixelChannels+1],
M30[MaxPixelChannels+1];
PointInfo
centroid[MaxPixelChannels+1];
ssize_t
channel,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) memset(channel_moments,0,(MaxPixelChannels+1)*
sizeof(*channel_moments));
(void) memset(centroid,0,sizeof(centroid));
(void) memset(M00,0,sizeof(M00));
(void) memset(M01,0,sizeof(M01));
(void) memset(M02,0,sizeof(M02));
(void) memset(M03,0,sizeof(M03));
(void) memset(M10,0,sizeof(M10));
(void) memset(M11,0,sizeof(M11));
(void) memset(M12,0,sizeof(M12));
(void) memset(M20,0,sizeof(M20));
(void) memset(M21,0,sizeof(M21));
(void) memset(M22,0,sizeof(M22));
(void) memset(M30,0,sizeof(M30));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M00[channel]+=QuantumScale*p[i];
M00[MaxPixelChannels]+=QuantumScale*p[i];
M10[channel]+=x*QuantumScale*p[i];
M10[MaxPixelChannels]+=x*QuantumScale*p[i];
M01[channel]+=y*QuantumScale*p[i];
M01[MaxPixelChannels]+=y*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
centroid[channel].x=M10[channel]*PerceptibleReciprocal(M00[channel]);
centroid[channel].y=M01[channel]*PerceptibleReciprocal(M00[channel]);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute the image moments.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
M00[MaxPixelChannels]/=GetImageChannels(image);
M01[MaxPixelChannels]/=GetImageChannels(image);
M02[MaxPixelChannels]/=GetImageChannels(image);
M03[MaxPixelChannels]/=GetImageChannels(image);
M10[MaxPixelChannels]/=GetImageChannels(image);
M11[MaxPixelChannels]/=GetImageChannels(image);
M12[MaxPixelChannels]/=GetImageChannels(image);
M20[MaxPixelChannels]/=GetImageChannels(image);
M21[MaxPixelChannels]/=GetImageChannels(image);
M22[MaxPixelChannels]/=GetImageChannels(image);
M30[MaxPixelChannels]/=GetImageChannels(image);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0*
PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])+
sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])*
(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0*
PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])-
sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])*
(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(1.0/2.0*atan(2.0*
M11[channel]*PerceptibleReciprocal(M20[channel]-M02[channel])));
if (fabs(M11[channel]) < 0.0)
{
if ((fabs(M20[channel]-M02[channel]) >= 0.0) &&
((M20[channel]-M02[channel]) < 0.0))
channel_moments[channel].ellipse_angle+=90.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) >= 0.0)
{
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
}
else
if ((fabs(M20[channel]-M02[channel]) >= 0.0) &&
((M20[channel]-M02[channel]) < 0.0))
channel_moments[channel].ellipse_angle+=90.0;
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y*
channel_moments[channel].ellipse_axis.y*PerceptibleReciprocal(
channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.x)));
channel_moments[channel].ellipse_intensity=M00[channel]*
PerceptibleReciprocal(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(1.0+1.0)/2.0));
M20[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(2.0+0.0)/2.0));
M02[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(0.0+2.0)/2.0));
M21[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(2.0+1.0)/2.0));
M12[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(1.0+2.0)/2.0));
M22[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(2.0+2.0)/2.0));
M30[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(3.0+0.0)/2.0));
M03[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(0.0+3.0)/2.0));
M00[channel]=1.0;
}
image_view=DestroyCacheView(image_view);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].invariant[0]=M20[channel]+M02[channel];
channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+
M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImagePerceptualHash method is:
%
% ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*perceptual_hash;
char
*colorspaces,
*q;
const char
*artifact;
MagickBooleanType
status;
register char
*p;
register ssize_t
i;
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
MaxPixelChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
artifact=GetImageArtifact(image,"phash:colorspaces");
if (artifact != NULL)
colorspaces=AcquireString(artifact);
else
colorspaces=AcquireString("sRGB,HCLp");
perceptual_hash[0].number_colorspaces=0;
perceptual_hash[0].number_channels=0;
q=colorspaces;
for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++)
{
ChannelMoments
*moments;
Image
*hash_image;
size_t
j;
ssize_t
channel,
colorspace;
if (i >= MaximumNumberOfPerceptualColorspaces)
break;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p);
if (colorspace < 0)
break;
perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace;
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
break;
hash_image->depth=8;
status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace,
exception);
if (status == MagickFalse)
break;
moments=GetImageMoments(hash_image,exception);
perceptual_hash[0].number_colorspaces++;
perceptual_hash[0].number_channels+=GetImageChannels(hash_image);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
break;
for (channel=0; channel <= MaxPixelChannels; channel++)
for (j=0; j < MaximumNumberOfImageMoments; j++)
perceptual_hash[channel].phash[i][j]=
(-MagickLog10(moments[channel].invariant[j]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
}
colorspaces=DestroyString(colorspaces);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageRange() returns the range of one or more image channels.
%
% The format of the GetImageRange method is:
%
% MagickBooleanType GetImageRange(const Image *image,double *minima,
% double *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima,
double *maxima,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
initialize,
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
initialize=MagickTrue;
*maxima=0.0;
*minima=0.0;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,initialize) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
row_maxima = 0.0,
row_minima = 0.0;
MagickBooleanType
row_initialize;
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
row_initialize=MagickTrue;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (row_initialize != MagickFalse)
{
row_minima=(double) p[i];
row_maxima=(double) p[i];
row_initialize=MagickFalse;
}
else
{
if ((double) p[i] < row_minima)
row_minima=(double) p[i];
if ((double) p[i] > row_maxima)
row_maxima=(double) p[i];
}
}
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageRange)
#endif
{
if (initialize != MagickFalse)
{
*minima=row_minima;
*maxima=row_maxima;
initialize=MagickFalse;
}
else
{
if (row_minima < *minima)
*minima=row_minima;
if (row_maxima > *maxima)
*maxima=row_maxima;
}
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageStatistics() returns statistics for each channel in the image. The
% statistics include the channel depth, its minima, maxima, mean, standard
% deviation, kurtosis and skewness. You can access the red channel mean, for
% example, like this:
%
% channel_statistics=GetImageStatistics(image,exception);
% red_mean=channel_statistics[RedPixelChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageStatistics method is:
%
% ChannelStatistics *GetImageStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t GetMedianPixel(Quantum *pixels,const size_t n)
{
#define SwapPixels(alpha,beta) \
{ \
register Quantum gamma=(alpha); \
(alpha)=(beta);(beta)=gamma; \
}
ssize_t
low = 0,
high = (ssize_t) n-1,
median = (low+high)/2;
for ( ; ; )
{
ssize_t
l = low+1,
h = high,
mid = (low+high)/2;
if (high <= low)
return(median);
if (high == (low+1))
{
if (pixels[low] > pixels[high])
SwapPixels(pixels[low],pixels[high]);
return(median);
}
if (pixels[mid] > pixels[high])
SwapPixels(pixels[mid],pixels[high]);
if (pixels[low] > pixels[high])
SwapPixels(pixels[low], pixels[high]);
if (pixels[mid] > pixels[low])
SwapPixels(pixels[mid],pixels[low]);
SwapPixels(pixels[mid],pixels[low+1]);
for ( ; ; )
{
do l++; while (pixels[low] > pixels[l]);
do h--; while (pixels[h] > pixels[low]);
if (h < l)
break;
SwapPixels(pixels[l],pixels[h]);
}
SwapPixels(pixels[low],pixels[h]);
if (h <= median)
low=l;
if (h >= median)
high=h-1;
}
}
MagickExport ChannelStatistics *GetImageStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
*histogram,
standard_deviation;
MagickStatusType
status;
MemoryInfo
*median_info;
Quantum
*median;
QuantumAny
range;
register ssize_t
i;
size_t
depth;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(
MaxPixelChannels+1,sizeof(*channel_statistics));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (double *) NULL))
{
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,(MaxPixelChannels+1)*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[channel].depth;
range=GetQuantumRange(depth);
status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),
range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[channel].depth++;
i--;
continue;
}
}
if ((double) p[i] < channel_statistics[channel].minima)
channel_statistics[channel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[channel].maxima)
channel_statistics[channel].maxima=(double) p[i];
channel_statistics[channel].sum+=p[i];
channel_statistics[channel].sum_squared+=(double) p[i]*p[i];
channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i];
channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*
p[i];
channel_statistics[channel].area++;
if ((double) p[i] < channel_statistics[CompositePixelChannel].minima)
channel_statistics[CompositePixelChannel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima)
channel_statistics[CompositePixelChannel].maxima=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum((double) p[i]))+i]++;
channel_statistics[CompositePixelChannel].sum+=(double) p[i];
channel_statistics[CompositePixelChannel].sum_squared+=(double)
p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_cubed+=(double)
p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_fourth_power+=(double)
p[i]*p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].area++;
}
p+=GetPixelChannels(image);
}
}
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal(channel_statistics[i].area);
channel_statistics[i].sum*=area;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area-
1.0)*channel_statistics[i].area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
number_bins;
register ssize_t
j;
/*
Compute pixel entropy.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
number_bins=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
if (histogram[GetPixelChannels(image)*j+i] > 0.0)
number_bins++;
area=PerceptibleReciprocal(channel_statistics[channel].area);
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
count;
count=area*histogram[GetPixelChannels(image)*j+i];
channel_statistics[channel].entropy+=-count*MagickLog10(count)*
PerceptibleReciprocal(MagickLog10(number_bins));
channel_statistics[CompositePixelChannel].entropy+=-count*
MagickLog10(count)*PerceptibleReciprocal(MagickLog10(number_bins))/
GetPixelChannels(image);
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
median_info=AcquireVirtualMemory(image->columns,image->rows*sizeof(*median));
if (median_info == (MemoryInfo *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
else
{
ssize_t
i;
median=(Quantum *) GetVirtualMemoryBlob(median_info);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
size_t
n = 0;
/*
Compute median statistics for each channel.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelReadMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
median[n++]=p[i];
}
p+=GetPixelChannels(image);
}
channel_statistics[channel].median=(double) median[
GetMedianPixel(median,n)];
}
median_info=RelinquishVirtualMemory(median_info);
}
channel_statistics[CompositePixelChannel].mean=0.0;
channel_statistics[CompositePixelChannel].median=0.0;
channel_statistics[CompositePixelChannel].standard_deviation=0.0;
channel_statistics[CompositePixelChannel].entropy=0.0;
for (i=0; i < (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[CompositePixelChannel].mean+=
channel_statistics[i].mean;
channel_statistics[CompositePixelChannel].median+=
channel_statistics[i].median;
channel_statistics[CompositePixelChannel].standard_deviation+=
channel_statistics[i].standard_deviation;
channel_statistics[CompositePixelChannel].entropy+=
channel_statistics[i].entropy;
}
channel_statistics[CompositePixelChannel].mean/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].median/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].standard_deviation/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].entropy/=(double)
GetImageChannels(image);
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict polynomial_pixels;
size_t
number_images;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register ssize_t
i,
x;
register PixelChannels
*polynomial_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_pixel=polynomial_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
polynomial_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
if (j >= (ssize_t) number_terms)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
MagickRealType
coefficient,
degree;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(polynomial_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
coefficient=(MagickRealType) terms[2*j];
degree=(MagickRealType) terms[(j << 1)+1];
polynomial_pixel[x].channel[i]+=coefficient*
pow(QuantumScale*GetPixelChannel(image,channel,p),degree);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,PolynomialImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _SkipNode
{
size_t
next[9],
count,
signature;
} SkipNode;
typedef struct _SkipList
{
ssize_t
level;
SkipNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed;
SkipList
skip_list;
size_t
signature;
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
if (pixel_list->skip_list.nodes != (SkipNode *) NULL)
pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory(
pixel_list->skip_list.nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) memset((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->skip_list.nodes));
if (pixel_list->skip_list.nodes == (SkipNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) memset(pixel_list->skip_list.nodes,0,65537UL*
sizeof(*pixel_list->skip_list.nodes));
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) memset(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const size_t color)
{
register SkipList
*p;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
p=(&pixel_list->skip_list);
p->nodes[color].signature=pixel_list->signature;
p->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=p->level; level >= 0; level--)
{
while (p->nodes[search].next[level] < color)
search=p->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (p->level+2))
level=p->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > p->level)
{
p->level++;
update[p->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
p->nodes[color].next[level]=p->nodes[update[level]].next[level];
p->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the median value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
do
{
color=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
max_count,
mode;
ssize_t
count;
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
p=(&pixel_list->skip_list);
color=65536L;
mode=color;
max_count=p->nodes[mode].count;
count=0;
do
{
color=p->nodes[color].next[0];
if (p->nodes[color].count > max_count)
{
mode=color;
max_count=p->nodes[mode].count;
}
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) mode);
}
static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
next,
previous;
ssize_t
count;
/*
Finds the non peak value for each of the colors.
*/
p=(&pixel_list->skip_list);
color=65536L;
next=p->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(pixel);
signature=pixel_list->skip_list.nodes[index].signature;
if (signature == pixel_list->signature)
{
pixel_list->skip_list.nodes[index].count++;
return;
}
AddNodePixelList(pixel_list,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register SkipNode
*root;
register SkipList
*p;
/*
Reset the skip-list.
*/
p=(&pixel_list->skip_list);
root=p->nodes+65536UL;
p->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
ssize_t
center,
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,0,0,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(statistic_image,DirectClass,exception);
if (status == MagickFalse)
{
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1));
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))*
(MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y-
(ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1),
MagickMax(height,1),exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
area,
maximum,
minimum,
sum,
sum_squared;
Quantum
pixel;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(statistic_traits == UndefinedPixelTrait))
continue;
if (((statistic_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(statistic_image,channel,p[center+i],q);
continue;
}
if ((statistic_traits & UpdatePixelTrait) == 0)
continue;
pixels=p;
area=0.0;
minimum=pixels[i];
maximum=pixels[i];
sum=0.0;
sum_squared=0.0;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) MagickMax(height,1); v++)
{
for (u=0; u < (ssize_t) MagickMax(width,1); u++)
{
if ((type == MedianStatistic) || (type == ModeStatistic) ||
(type == NonpeakStatistic))
{
InsertPixelList(pixels[i],pixel_list[id]);
pixels+=GetPixelChannels(image);
continue;
}
area++;
if (pixels[i] < minimum)
minimum=(double) pixels[i];
if (pixels[i] > maximum)
maximum=(double) pixels[i];
sum+=(double) pixels[i];
sum_squared+=(double) pixels[i]*pixels[i];
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
switch (type)
{
case GradientStatistic:
{
pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum));
break;
}
case MaximumStatistic:
{
pixel=ClampToQuantum(maximum);
break;
}
case MeanStatistic:
default:
{
pixel=ClampToQuantum(sum/area);
break;
}
case MedianStatistic:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
pixel=ClampToQuantum(minimum);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
pixel=ClampToQuantum(sqrt(sum_squared/area));
break;
}
case StandardDeviationStatistic:
{
pixel=ClampToQuantum(sqrt(sum_squared/area-(sum/area*sum/area)));
break;
}
}
SetPixelChannel(statistic_image,channel,pixel,q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(statistic_image);
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,StatisticImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
enhancement.h | /*
* Copyright 2018 Benjamin Santos <caos21@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef ENHANCEMENT_H
#define ENHANCEMENT_H
#include <algorithm>
#include <chrono>
#include <cstdlib>
#include <ctime>
#include <fstream>
#include <functional>
#include <iostream>
#include <memory>
#include <parallel/algorithm>
#include <vector>
#include <sstream>
#include <string>
#include <utility>
#include <omp.h>
#include <boost/algorithm/string.hpp>
#include <boost/lexical_cast.hpp>
#include "array.h"
#include "constants.h"
#include "eint.h"
#include "log.h"
#include "utils.h"
#define MPC_ERROR 1.0 // 0.4
#define ETA_TOL 1.0e-16
using namespace utils;
typedef std::pair<short, short> pp;
typedef std::pair<pp, pp> pairs;
typedef std::vector<short> vs;
typedef std::vector<pp> vp;
typedef std::vector<pairs> vpairs;
namespace enhancement {
const boost::uintmax_t ROOT_MAXITER = 1000;
const tools::eps_tolerance<double> ROOT_TOL(30);
/*!
* class PairElement
*
* Represents a value for a particle pair combination (l,q)+(m,p)
*
*/
class PairElement {
public:
//
PairElement() {
l_ = 0;
q_ = 0;
m_ = 0;
p_ = 0;
value_ = 0.0;
}
PairElement(const short &l, const short &q, const short &m, const short &p,
const double &value)
: l_(l), q_(q), m_(m), p_(p), value_(value) {}
PairElement(const PairElement &pe) { assign(pe); }
short l_; //!< Index l of coalescing particle 1
short q_; //!< Index q of coalescing particle 1
short m_; //!< Index m of coalescing particle 2
short p_; //!< Index p of coalescing particle 2
double value_; //!< Value
void assign(const PairElement &pe) {
l_ = pe.l_;
q_ = pe.q_;
m_ = pe.m_;
p_ = pe.p_;
value_ = pe.value_;
}
friend void fill_pe(PairElement &pe, const short &l, const short &q,
const short &m, const short &p, const double &value);
friend void pairelement_tostream(const PairElement &pe, std::ostream &stream);
friend void pairelement_tostream(PairElement &pe, std::istringstream &stream);
void print(std::ostream &stream = std::cout) {
pairelement_tostream(*this, stream);
}
};
inline void get_pe(const PairElement &pe, short &l, short &q, short &m,
short &p, double &value) {
l = pe.l_;
q = pe.q_;
m = pe.m_;
p = pe.p_;
value = pe.value_;
}
inline void fill_pe(PairElement &pe, const short &l, const short &q,
const short &m, const short &p, const double &value) {
pe.l_ = l;
pe.q_ = q;
pe.m_ = m;
pe.p_ = p;
pe.value_ = value;
}
inline void pairelement_tostream(const PairElement &pe, std::ostream &stream) {
stream << pe.l_ << '\t' << pe.q_ << '\t' << pe.m_ << '\t' << pe.p_ << '\t'
<< pe.value_;
}
inline void pairelement_fromstream(PairElement &pe,
std::istringstream &stream) {
std::string sl_; //!< Index l of current volume section
std::string sq_; //!< Index q of current charge section
std::string sm_; //!< Index m of coalescing volume
std::string sp_; //!< Index p of coalescing charge
std::string svalue_;
short l_; //!< Index l of coalescing particle 1
short q_; //!< Index q of coalescing particle 1
short m_; //!< Index m of coalescing particle 2
short p_; //!< Index p of coalescing particle 2
double value_; //!< Value
stream >> sl_ >> sq_ >> sm_ >> sp_ >> svalue_;
l_ = boost::lexical_cast<short>(sl_);
q_ = boost::lexical_cast<short>(sq_);
m_ = boost::lexical_cast<short>(sm_);
p_ = boost::lexical_cast<short>(sp_);
value_ = std::stod(svalue_);
fill_pe(pe, l_, q_, m_, p_, value_);
}
//
class ParticlePair {
public:
//
ParticlePair() {
// call fill to set 0 all the fields
// fill_ppair(*this, 0, 0, 0, 0, 0.0, 0.0, true);
id_ = 0;
l_ = 0;
q_ = 0;
m_ = 0;
p_ = 0;
r21_ = 0.0;
q21_ = 0.0;
notswapd_ = true;
}
ParticlePair(const unsigned long &id, const short &l, const short &q,
const short &m, const short &p, const double &r21,
const double &q21, const bool ¬swapd)
: id_(id),
l_(l),
q_(q),
m_(m),
p_(p),
r21_(r21),
q21_(q21),
notswapd_(notswapd) {
// call fill to set 0 all the fields
// fill_ppair(*this, 0, 0, 0, 0, 0.0, 0.0, true);
}
ParticlePair(const ParticlePair &pp) { assign(pp); }
unsigned long id_; //!< Index
short l_; //!< Index l of coalescing particle 1
short q_; //!< Index q of coalescing particle 1
short m_; //!< Index m of coalescing particle 2
short p_; //!< Index p of coalescing particle 2
double r21_; //!< Value for radius fraction r2/r1
double q21_; //!< Value for radius fraction r2/r1
bool notswapd_; //!< false if indices were swapped
void assign(const ParticlePair &pp) {
id_ = pp.id_;
l_ = pp.l_;
q_ = pp.q_;
m_ = pp.m_;
p_ = pp.p_;
r21_ = pp.r21_;
q21_ = pp.q21_;
notswapd_ = pp.notswapd_;
}
friend void fill_ppair(ParticlePair &pp, const unsigned long &id,
const short &l, const short &q, const short &m,
const short &p, const double &r21, const double &q21,
const bool ¬swapd);
friend void particlepair_tostream(const ParticlePair &pp,
std::ostream &stream);
friend void particlepair_fromstream(ParticlePair &pp,
std::istringstream &stream);
void print(std::ostream &stream = std::cout) {
particlepair_tostream(*this, stream);
}
};
inline void fill_ppair(ParticlePair &pp, const unsigned long &id,
const short &l, const short &q, const short &m,
const short &p, const double &r21, const double &q21,
const bool ¬swapd) {
pp.id_ = id;
pp.l_ = l;
pp.q_ = q;
pp.m_ = m;
pp.p_ = p;
pp.r21_ = r21;
pp.q21_ = q21;
pp.notswapd_ = notswapd;
}
inline void particlepair_tostream(const ParticlePair &pp,
std::ostream &stream) {
stream << pp.id_ << '\t' << pp.l_ << '\t' << pp.q_ << '\t' << pp.m_ << '\t'
<< pp.p_ << '\t' << pp.r21_ << '\t' << pp.q21_ << '\t' << pp.notswapd_;
}
inline void particlepair_fromstream(ParticlePair &pp,
std::istringstream &stream) {
std::string sid_; //!< Index l of current volume section
std::string sl_; //!< Index l of current volume section
std::string sq_; //!< Index q of current charge section
std::string sm_; //!< Index m of coalescing volume
std::string sp_; //!< Index p of coalescing charge
std::string sr21_;
std::string sq21_;
std::string snotswapd_;
unsigned long id_;
short l_; //!< Index l of coalescing particle 1
short q_; //!< Index q of coalescing particle 1
short m_; //!< Index m of coalescing particle 2
short p_; //!< Index p of coalescing particle 2
double r21_; //!< Value for radius fraction r2/r1
double q21_; //!< Value for radius fraction r2/r1
bool notswapd_; //!< false if indices were swapped
stream >> sid_ >> sl_ >> sq_ >> sm_ >> sp_ >> sr21_ >> sq21_ >> snotswapd_;
id_ = boost::lexical_cast<unsigned long>(sid_);
l_ = boost::lexical_cast<short>(sl_);
q_ = boost::lexical_cast<short>(sq_);
m_ = boost::lexical_cast<short>(sm_);
p_ = boost::lexical_cast<short>(sp_);
r21_ = std::stod(sr21_);
q21_ = std::stod(sq21_);
notswapd_ = boost::lexical_cast<bool>(snotswapd_);
fill_ppair(pp, id_, l_, q_, m_, p_, r21_, q21_, notswapd_);
}
class ReducedParticlePair {
public:
//
ReducedParticlePair() {
// call fill to set 0 all the fields
fill_rppair(*this, 0, 0.0, 0.0);
}
bool operator<(const ReducedParticlePair &rpp) const {
if (r21_ != rpp.r21_) {
return (r21_ < rpp.r21_);
} else {
return (q21_ < rpp.q21_);
}
}
inline bool operator==(const ReducedParticlePair &rpp) const {
if ((r21_ == rpp.r21_) && (q21_ == rpp.q21_)) {
return true;
} else {
return false;
}
}
// friend
// bool operator == (const ReducedParticlePair& lx,
// const ReducedParticlePair& rx);
unsigned long id_;
double r21_; //!< Value for radius fraction r2/r1
double q21_; //!< Value for radius fraction r2/r1
std::vector<long> repetitions_;
ReducedParticlePair(const ReducedParticlePair &rpp) { assign(rpp); }
void assign(const ReducedParticlePair &rpp) {
id_ = rpp.id_;
r21_ = rpp.r21_;
q21_ = rpp.q21_;
repetitions_ = rpp.repetitions_;
}
friend void reducedparticlepair_tostream(const ReducedParticlePair &rpp,
std::ostream &stream);
friend void reducedparticlepair_fromstream(ReducedParticlePair &rpp,
std::istream &stream);
void print(std::ostream &stream = std::cout) {
reducedparticlepair_tostream(*this, stream);
}
friend void fill_rppair(ReducedParticlePair &rpp, unsigned long id_,
double r21, double q21);
friend void fill_rppair(ReducedParticlePair &rpp, unsigned long id_,
double r21, double q21,
std::vector<long> repetitions);
friend void fill_rppair(ReducedParticlePair &rpp, unsigned long id_,
double r21, double q21, long irepeated);
void fill_repeated(long const &irepeated) {
repetitions_.push_back(irepeated);
}
};
// bool operator == (const ReducedParticlePair& lx,
// const ReducedParticlePair& rx) {
// // return ((lx.r21_==rx.r21_) && (lx.q21_==rx.q21_));
// if ((lx.r21_==rx.r21_) && (lx.q21_==rx.q21_)) {
// return true;
// }
// else {
// return false;
// }
// }
inline bool operator==(const ReducedParticlePair &lr, const ParticlePair &rp) {
// return ((lx.r21_==rx.r21_) && (lx.q21_==rx.q21_));
if ((lr.r21_ == rp.r21_) && (lr.q21_ == rp.q21_)) {
return true;
} else {
return false;
}
}
inline void fill_rppair(ReducedParticlePair &rpp, unsigned long id, double r21,
double q21) {
rpp.id_ = id;
rpp.r21_ = r21;
rpp.q21_ = q21;
}
inline void fill_rppair(ReducedParticlePair &rpp, unsigned long id, double r21,
double q21, std::vector<long> repetitions) {
rpp.id_ = id;
rpp.r21_ = r21;
rpp.q21_ = q21;
rpp.repetitions_ = repetitions;
}
inline void fill_rppair(ReducedParticlePair &rpp, unsigned long id, double r21,
double q21, long irepeated) {
rpp.id_ = id;
rpp.r21_ = r21;
rpp.q21_ = q21;
rpp.repetitions_.push_back(irepeated);
}
struct reducedPairsComparison {
bool operator()(const ReducedParticlePair &lx,
const ReducedParticlePair &rx) const {
if (lx.r21_ != rx.r21_) {
return (lx.r21_ < rx.r21_);
}
return (lx.q21_ < rx.q21_);
}
};
inline void reducedparticlepair_tostream(const ReducedParticlePair &rpp,
std::ostream &stream) {
stream << rpp.id_ << '\t' << rpp.r21_ << '\t' << rpp.q21_ << '\t';
// << "repeated = \t";
for (auto ir : rpp.repetitions_) {
stream << ir << ",";
}
}
inline void reducedparticlepair_fromstream(ReducedParticlePair &rpp,
std::istream &stream) {
std::string sid_;
std::string sr21_;
std::string sq21_;
std::string srep_;
unsigned long id_;
double r21_; //!< Value for radius fraction r2/r1
double q21_; //!< Value for radius fraction r2/r1
std::vector<long> repetitions_;
stream >> sid_ >> sr21_ >> sq21_ >> srep_;
id_ = boost::lexical_cast<unsigned long>(sid_);
r21_ = std::stod(sr21_);
q21_ = std::stod(sq21_);
// split line
std::vector<std::string> vssrep_;
boost::split(vssrep_, srep_, boost::is_any_of(","));
for (std::vector<std::string>::iterator it = vssrep_.begin();
it < vssrep_.end(); ++it) {
try {
long trep = (boost::lexical_cast<long>(*it));
repetitions_.push_back(trep);
} catch (const boost::bad_lexical_cast &) {
// std::cerr << "\n[ee] Error from lexical cast : " << *it;
break;
}
// std::cout << "\t" << *it;
}
fill_rppair(rpp, id_, r21_, q21_, repetitions_);
}
class ContactPotential {
public:
ContactPotential(const unsigned long &id = 0, const double &potential = 0,
const short &n = 0)
: id_(id), potential_(potential), n_(n) {}
friend void fill_contact_potential(ContactPotential &cpot,
const unsigned long &id,
const double &potential, const short &n);
void print(std::ostream &stream = std::cout) {
contactpotential_tostream(*this, stream);
}
friend void contactpotential_tostream(const ContactPotential &cpot,
std::ostream &stream);
unsigned long id_;
double potential_;
short n_;
};
inline void fill_contact_potential(ContactPotential &cpot,
const unsigned long &id,
const double &potential, const short &n) {
cpot.id_ = id;
cpot.potential_ = potential;
cpot.n_ = n;
}
inline void contactpotential_tostream(const ContactPotential &cpot,
std::ostream &stream) {
stream << '\n' << cpot.id_ << '\t' << cpot.potential_ << '\t' << cpot.n_;
}
struct find_id {
unsigned long id;
find_id(const unsigned long &id_) : id(id_) {}
bool operator()(const ContactPotential &cpot) const {
// return rpp.id_ = id
// std::vector<long> rptd = rpp.repetitions_;
// std::vector<long>::iterator it = std::find(rptd.begin(), rptd.end(), id);
// return std::find(rptd.begin(), rptd.end(), id) != rptd.end();
return (cpot.id_ == id);
}
};
typedef std::vector<ContactPotential>::iterator ContactPotentialIterator;
class Enhancement {
public:
// Enhancement() {
//}
Enhancement(const darray &rarray_, const darray &qarray_,
boost_array4d_ref efactor_, boost_array4d_ref cpotentials_,
boost_array4d_ref bpotentials_, boost_array4d_ref rbarriers_,
double eps_, src::severity_logger<severity_level> lg_)
: rarray(rarray_),
qarray(qarray_),
efactor(efactor_),
cpotentials(cpotentials_),
bpotentials(bpotentials_),
rbarriers(rbarriers_),
eps(eps_),
lg(lg_) {
rarray_size = static_cast<unsigned short>(rarray.size());
qarray_size = static_cast<unsigned short>(qarray.size());
ncombs =
((rarray_size * qarray_size) * (rarray_size * qarray_size + 1)) / 2;
particle_pairs.reserve(ncombs);
potential_threshold = 0.0;
nmin = 25;
nmax = 2000;
nstep = 5;
AH = 20e-20;
}
Enhancement(const darray &rarray_, const darray &qarray_, double eps_,
src::severity_logger<severity_level> lg_)
: rarray(rarray_),
qarray(qarray_),
efactor(dummy), // Hack, references must belong to an actual object
cpotentials(dummy),
bpotentials(dummy),
rbarriers(dummy),
eps(eps_),
lg(lg_) {
rarray_size = static_cast<unsigned short>(rarray.size());
qarray_size = static_cast<unsigned short>(qarray.size());
ncombs =
((rarray_size * qarray_size) * (rarray_size * qarray_size + 1)) / 2;
particle_pairs.reserve(ncombs);
potential_threshold = 0.0;
nmin = 25;
nmax = 2000;
nstep = 5;
}
Enhancement(const darray &rarray_, const darray &qarray_,
boost_array4d_ref efactor_, double eps_,
src::severity_logger<severity_level> lg_)
: rarray(rarray_),
qarray(qarray_),
efactor(efactor_), // Hack, references must belong to an actual object
cpotentials(dummy),
bpotentials(dummy),
rbarriers(dummy),
eps(eps_),
lg(lg_) {
rarray_size = static_cast<unsigned short>(rarray.size());
qarray_size = static_cast<unsigned short>(qarray.size());
ncombs =
((rarray_size * qarray_size) * (rarray_size * qarray_size + 1)) / 2;
particle_pairs.reserve(ncombs);
potential_threshold = 0.0;
nmin = 25;
nmax = 2000;
nstep = 5;
}
inline int write_particlepairs(std::string ppfilename) {
std::ofstream ppstream(std::string(ppfilename + "_pp.dat"));
ppstream << "#id\tl\tq\tm\tp\tr21\tq21\tnotswapd";
// #pragma omp parallel for ordered//schedule(nonmonotonic:dynamic)
for (unsigned int i = 0; i < particle_pairs.size(); ++i) {
// #pragma omp ordered
// {
ppstream << '\n';
particlepair_tostream(particle_pairs[i], ppstream);
// }
}
ppstream.close();
// Neutral particles
std::ofstream npstream(std::string(ppfilename + "_np.dat"));
npstream << "#id\tl\tq\tm\tp\tr21\tq21\tnotswapd";
// #pragma omp parallel for ordered//schedule(nonmonotonic:dynamic)
for (unsigned int i = 0; i < neutral_pairs.size(); ++i) {
// #pragma omp ordered
// {
npstream << '\n';
particlepair_tostream(neutral_pairs[i], npstream);
// }
}
npstream.close();
// Reduced pairs
std::ofstream rpstream(std::string(ppfilename + "_rp.dat"));
rpstream << "#id\tr21\tq21\trepetitions";
// #pragma omp parallel for ordered//schedule(nonmonotonic:dynamic)
for (unsigned int i = 0; i < reduced_pairs.size(); ++i) {
// #pragma omp ordered
// {
rpstream << '\n';
reducedparticlepair_tostream(reduced_pairs[i], rpstream);
// }
}
rpstream.close();
return 0;
}
inline int read_particlepairs(std::string ppfilename) {
// Read particle pairs
{
// Clear vector
particle_pairs.clear();
std::ifstream ppstream(std::string(ppfilename + "_pp.dat"));
std::string line;
unsigned int nlines = 0;
std::getline(ppstream, line);
// if a comment first line isnt found, rewind the file
if (line.find("#") == std::string::npos) {
ppstream.clear();
ppstream.seekg(0);
}
// read line by line
while (std::getline(ppstream, line)) {
ParticlePair pp;
std::istringstream iss(line);
particlepair_fromstream(pp, iss);
particle_pairs.push_back(pp);
++nlines;
}
ppstream.close();
BOOST_LOG_SEV(lg, info)
<< "Particle pair size = " << particle_pairs.size();
}
// Read particle pairs
{
// Clear vector
neutral_pairs.clear();
std::ifstream npstream(std::string(ppfilename + "_np.dat"));
std::string line;
unsigned int nlines = 0;
std::getline(npstream, line);
// if a comment first line isnt found, rewind the file
if (line.find("#") == std::string::npos) {
npstream.clear();
npstream.seekg(0);
}
// read line by line
while (std::getline(npstream, line)) {
ParticlePair np;
std::istringstream iss(line);
particlepair_fromstream(np, iss);
neutral_pairs.push_back(np);
++nlines;
}
npstream.close();
BOOST_LOG_SEV(lg, info) << "Neutral pair size = " << neutral_pairs.size();
}
// Read reduced pairs
{
// Clear vector
reduced_pairs.clear();
std::ifstream rpstream(std::string(ppfilename + "_rp.dat"));
std::string line;
unsigned int nlines = 0;
std::getline(rpstream, line);
// if a comment first line isnt found, rewind the file
if (line.find("#") == std::string::npos) {
rpstream.clear();
rpstream.seekg(0);
}
// read line by line
while (std::getline(rpstream, line)) {
ReducedParticlePair rp;
std::istringstream iss(line);
reducedparticlepair_fromstream(rp, iss);
reduced_pairs.push_back(rp);
++nlines;
}
rpstream.close();
BOOST_LOG_SEV(lg, info) << "Reduced pair size = " << reduced_pairs.size();
}
// for (auto rp: reduced_pairs){
// std::cout << std::endl;
// rp.print();
// }
if (reduced_pairs.size() > 0) {
contact_potentials.resize(reduced_pairs.size());
}
return 0;
}
inline void compute_reducedpairs() {
BOOST_LOG_SEV(lg, info) << "Computing particle pairs...";
auto start = std::chrono::system_clock::now();
//
unsigned long idpp = 0; // index for particle pairs
unsigned long idnp = 0; // index for neutral pairs
// #pragma omp parallel for collapse(4) schedule(auto)
for (unsigned int l = 0; l < rarray_size; ++l) {
// iterate in charges particle 1
for (unsigned int q = 0; q < qarray_size; ++q) {
// iterate in radii particle 2
for (unsigned int m = 0; m < rarray_size; ++m) {
// iterate in charges particle 2
for (unsigned int p = 0; p < qarray_size; ++p) {
unsigned int ip1 = q * rarray_size + l;
unsigned int ip2 = p * rarray_size + m;
// avoid repetitions
if ((p >= q) && (ip2 >= ip1)) {
double r1 = rarray[l];
double q1 = qarray[q];
double r2 = rarray[m];
double q2 = qarray[p];
unsigned int lp1 = l;
unsigned int qp1 = q;
unsigned int mp2 = m;
unsigned int pp2 = p;
bool notswapd = true;
// swap particles. We want to keep r21 < 1
if (r2 > r1) {
std::swap(r1, r2);
std::swap(q1, q2);
std::swap(lp1, mp2);
std::swap(qp1, pp2);
notswapd = false;
}
//double r21 = r2 / r1;
double q21 = q2 / q1;
// WARNING float comparison
if (q1 == 0.0) {
// q1=0, permute particle 1 with 2
std::swap(r1, r2);
std::swap(q1, q2);
std::swap(lp1, mp2);
std::swap(qp1, pp2);
q21 = 0.0;
notswapd = notswapd | false;
}
double r21 = r2 / r1;
// if(q2==0.0){
// q21 = 0.0;
// }
// all neutrals case
if ((q1 == 0.0) && (q2 == 0.0)) {
// lp1 = l; qp1 = q;
// mp2 = m; pp2 = p;
// q21 = 0.0;
// r21 = rarray[m]/rarray[l];
ParticlePair neutralpair(idnp, lp1, qp1, mp2, pp2, r21, q21,
notswapd);
neutral_pairs.push_back(neutralpair);
++idnp;
continue; // break;
}
ParticlePair ppair(idpp, lp1, qp1, mp2, pp2, r21, q21, notswapd);
particle_pairs.push_back(ppair);
++idpp;
// write symmetric combination
}
}
}
}
}
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
BOOST_LOG_SEV(lg, info) << "Pairs size : " << particle_pairs.size();
BOOST_LOG_SEV(lg, info) << "Neutral pairs size : " << neutral_pairs.size();
BOOST_LOG_SEV(lg, info) << "Total pairs size : "
<< particle_pairs.size() + neutral_pairs.size();
BOOST_LOG_SEV(lg, info) << "Total combinations : " << ncombs;
BOOST_LOG_SEV(lg, info) << "Computing reduced pairs...";
reduced_pairs.resize(particle_pairs.size());
start = std::chrono::system_clock::now();
for (unsigned int i = 0; i < particle_pairs.size(); ++i) {
reduced_pairs[i].id_ = particle_pairs[i].id_;
reduced_pairs[i].r21_ = particle_pairs[i].r21_;
reduced_pairs[i].q21_ = particle_pairs[i].q21_;
}
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
start = std::chrono::system_clock::now();
BOOST_LOG_SEV(lg, info) << "Sorting...";
//__gnu_parallel::sort(reduced_pairs.begin(), reduced_pairs.end(),
//reducedPairsComparison());
std::sort(reduced_pairs.begin(), reduced_pairs.end(),
reducedPairsComparison());
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
// ******** new stuff
// vector sorted with repeated
std::vector<ReducedParticlePair> sorted_reducedpairs = reduced_pairs;
// ******** end new stuff
BOOST_LOG_SEV(lg, info) << "Erasing...";
reduced_pairs.erase(std::unique(reduced_pairs.begin(), reduced_pairs.end()),
reduced_pairs.end());
//__gnu_parallel::unique_copy(reduced_pairs.begin(), reduced_pairs.end(),
//reduced_pairs);
// resize in memory
std::vector<ReducedParticlePair> tmp = reduced_pairs;
reduced_pairs.swap(tmp);
// clear memory for tmp
std::vector<ReducedParticlePair>().swap(tmp);
//****************************************************************************
// Add indices of repeated to vector repetitions
//****************************************************************************
BOOST_LOG_SEV(lg, info) << "Reduced pairs size : " << reduced_pairs.size();
start = std::chrono::system_clock::now();
/// new stuff
// #pragma omp parallel for schedule(nonmonotonic:dynamic)// collapse(2)
long jstart = 0;
for (unsigned int irp = 0; irp < reduced_pairs.size(); ++irp) {
for (long jpp = jstart; jpp < sorted_reducedpairs.size(); ++jpp) {
if (reduced_pairs[irp] == sorted_reducedpairs[jpp]) {
// #pragma omp critical
// {
// one thread a at time
reduced_pairs[irp].fill_repeated(sorted_reducedpairs[jpp].id_);
// }
} else {
jstart = jpp;
break;
}
}
}
/// end new stuff
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
start = std::chrono::system_clock::now();
// resize contact potentials
if (reduced_pairs.size() > 0) {
contact_potentials.resize(reduced_pairs.size());
}
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
BOOST_LOG_SEV(lg, info) << "Done computing pairs.";
}
inline
void compute_ipavdwpotential_contact(double cutoff, double vdw=1.0) {
BOOST_LOG_SEV(lg, info) << "Computing IPA+vdW at contact...";
auto start = std::chrono::system_clock::now();
//
unsigned long idpp = 0; // index for particle pairs
unsigned long idnp = 0; // index for neutral pairs
// #pragma omp parallel for collapse(4) schedule(auto)
for (unsigned int l = 0; l < rarray_size; ++l) {
// iterate in charges particle 1
for (unsigned int q = 0; q < qarray_size; ++q) {
// iterate in radii particle 2
for (unsigned int m = 0; m < rarray_size; ++m) {
// iterate in charges particle 2
for (unsigned int p = 0; p < qarray_size; ++p) {
double r1 = rarray[l];
double q1 = qarray[q];
double r2 = rarray[m];
double q2 = qarray[p];
double rt = r1+r2;
eint::potential_ipavdw_funct pipavdw(r1, q1, r2, q2, eps, AH, cutoff, 1.0, vdw);
cpotentials[l][q][m][p] = pipavdw(rt);
}
}
}
}
BOOST_LOG_SEV(lg, info) << "Done computing IPA+vdW potential contact.";
}
inline
short particle_number(short l, short q, short L) {
return l+L*q;
}
inline
void mpcvdwpotential_barrier(short l, short q,
short m, short p,
double cutoff) {
double r1 = rarray[l];
double q1 = qarray[q];
double r2 = rarray[m];
double q2 = qarray[p];
double rmin = r1+r2;
double rmax = 100.0*rmin;
double min = rmin;
double max = rmax;
short n = nterms4d[l][q][m][p];
eint::force_mpcvdw_funct fmpcvdwfunct(r1, q1, r2, q2, eps, n, AH, cutoff, 1.0/*mpc*/, 1.0/*vdw*/);
// force mpc at contact
double fmpcvdw_rmin = fmpcvdwfunct(rmin);
// Force at r max
double fmpcvdw_rmax = fmpcvdwfunct(rmax);
// checks if minimum exists
if (fmpcvdw_rmin * fmpcvdw_rmax < 0.0) {
// std::cerr << "\n[ii] Mixed phi_rt = " << fmpc_rmin << '\t' <<
// fmpc_rmax;
boost::uintmax_t bmax_iter = ROOT_MAXITER;
tools::eps_tolerance<double> tol = ROOT_TOL;
std::pair<double, double> pair_fmpcvdw;
// try {
pair_fmpcvdw = tools::toms748_solve(fmpcvdwfunct, min, max, fmpcvdw_rmin,
fmpcvdw_rmax, tol, bmax_iter);
if (bmax_iter > 990) {
std::cerr << "\n ERROR max iter " << bmax_iter << "\n\n";
std::terminate();
}
double rbarrier = 0.5 * (pair_fmpcvdw.first + pair_fmpcvdw.second);
if (rbarrier >= min) {
//*********************** USE SAME COEFFICIENTS OF FORCE
//#pragma omp critical
//{
// mpc functor
eint::potential_mpcvdw_funct pmpcvdwfunct(r1, q1, r2, q2, eps, n, AH, cutoff, 1.0/*mpc*/, 1.0/*vdw*/);
// mpc at contact
double pmpcvdw_rb = pmpcvdwfunct(rbarrier);
bpotentials[l][q][m][p] = pmpcvdw_rb;
rbarriers[l][q][m][p] = rbarrier;
bpotentials[m][p][l][q] = bpotentials[l][q][m][p];
rbarriers[m][p][l][q] = rbarriers[l][q][m][p];
//}
}
else {
std::cerr << "\n ERROR Negative rbarrier " << rbarrier << '\n';
std::terminate();
}
}
}
inline
void mpcvdwpotential_contact(short l, short q,
short m, short p,
double cutoff) {
double r1 = rarray[l];
double q1 = qarray[q];
double r2 = rarray[m];
double q2 = qarray[p];
double rt = r1+r2;
eint::potential_ipavdw_funct pipavdw(r1, q1, r2, q2, eps, AH,
cutoff, 1.0, 1.0);
// ipa + vdw at contact
double pipavdw_rt = pipavdw(rt);
double pcomp = pipavdw_rt;
unsigned short initer = 0;
for (unsigned short n = nmin; n <= nmax; n += nstep, ++initer) {
// mpc functor
eint::potential_mpcvdw_funct pmpcvdwfunct(r1, q1, r2, q2, eps, n,
AH, cutoff, 1.0/*mpc*/, 1.0/*vdw*/);
// mpc at contact
double pmpcvdw_rt = pmpcvdwfunct(rt);
double error_comp = max_pct_error(pcomp, pmpcvdw_rt);
// std::cerr << '\n' << pcomp << '\t' << pmpcvdw_rt
// << '\t' << cpotentials[l][q][m][p];
if ((error_comp < MPC_ERROR) && (initer > 0)) {
cpotentials[l][q][m][p] = pmpcvdw_rt;
nterms4d[l][q][m][p] = n;
cpotentials[m][p][l][q] = cpotentials[l][q][m][p];
nterms4d[m][p][l][q] = nterms4d[l][q][m][p];
// std::cerr << '\n' << pcomp << '\t' << pmpcvdw_rt
// << '\t' << cpotentials[l][q][m][p]
// << '\t' << n;
// end iterations
n = nmax;
}
else {
if (n > nmax - nstep) {
std::cerr << "\n[ww] Max iterations exceeded\n";
cpotentials[l][q][m][p] = pmpcvdw_rt;
nterms4d[l][q][m][p] = n;
cpotentials[m][p][l][q] = cpotentials[l][q][m][p];
nterms4d[m][p][l][q] = nterms4d[l][q][m][p];
}
}
pcomp = pmpcvdw_rt;
}
}
template <typename PotentialFunctor>
void iterate_symmetric(short L, short Q, PotentialFunctor func,
double cutoff) {
// n differents particles with r and q
short n = L*Q;
// vector of particle numbers -> pair l, q
vp particle(n);
for(short l=0; l<L; ++l) {
for(short q=0; q<Q; ++q) {
pp p = pp(l,q);
short pnumber = particle_number(l, q, L);
particle[pnumber] = p;
}
}
// number of unique potentials to compute - non repeated combinations
// of particles
unsigned comb = n*(n+1)/2;
// vector to store the combinations
vp vps(comb);
unsigned count = 0;
for(short i=0; i<n; ++i) {
for(short k=i; k<n; ++k) {
pp p = pp(i, k);
// store the combinations particle number i , particle number j
vps[count] = p;
count++;
}
}
BOOST_LOG_SEV(lg, info) << "\n total length " << vps.size();
count = 0;
#pragma omp parallel for
for(unsigned k=0; k<vps.size(); k++) {
short i = vps[k].first;// particle i
short j = vps[k].second;// particle j
count++;
// get indices (l,q), (m,r) for particles i and j
short l = particle[i].first;
short q = particle[i].second;
short m = particle[j].first;
short r = particle[j].second;
// apply function to indices and cutoff
(this->*func)(l, q, m, r, cutoff);
}
}
inline
void compute_mpcvdwpotential_contact_sym(double cutoff) {
BOOST_LOG_SEV(lg, info) << "Computing MPC+vdW at contact symmetric...";
iterate_symmetric(rarray_size, qarray_size, &Enhancement::mpcvdwpotential_contact, cutoff);
}
inline
void compute_mpcvdwpotential_barrier_sym(double cutoff) {
BOOST_LOG_SEV(lg, info) << "Computing MPC+vdW barrier symmetric...";
// pass the reference of the method to iterate_symmetric
iterate_symmetric(rarray_size, qarray_size, &Enhancement::mpcvdwpotential_barrier, cutoff);
}
inline
void compute_mpcvdwpotential_contact_bf(double cutoff) {
BOOST_LOG_SEV(lg, info) << "Computing MPC+vdW at contact...";
#pragma omp parallel for ordered collapse(4) schedule(auto)
for (unsigned int l = 0; l < rarray_size; ++l) {
// iterate in charges particle 1
for (unsigned int q = 0; q < qarray_size; ++q) {
// iterate in radii particle 2
for (unsigned int m = 0; m < rarray_size; ++m) {
// iterate in charges particle 2
for (unsigned int p = 0; p < qarray_size; ++p) {
double r1 = rarray[l];
double q1 = qarray[q];
double r2 = rarray[m];
double q2 = qarray[p];
double rt = r1+r2;
eint::potential_ipavdw_funct pipavdw(r1, q1, r2, q2, eps, AH,
cutoff, 1.0, 1.0);
// ipa + vdw at contact
double pipavdw_rt = pipavdw(rt);
double pcomp = pipavdw_rt;
unsigned short initer = 0;
for (unsigned short n = nmin; n <= nmax; n += nstep, ++initer) {
// mpc functor
eint::potential_mpcvdw_funct pmpcvdwfunct(r1, q1, r2, q2, eps, n,
AH, cutoff, 1.0/*mpc*/, 1.0/*vdw*/);
// mpc at contact
double pmpcvdw_rt = pmpcvdwfunct(rt);
double error_comp = max_pct_error(pcomp, pmpcvdw_rt);
// std::cerr << '\n' << pcomp << '\t' << pmpcvdw_rt
// << '\t' << cpotentials[l][q][m][p];
if ((error_comp < MPC_ERROR) && (initer > 0)) {
cpotentials[l][q][m][p] = pmpcvdw_rt;
nterms4d[l][q][m][p] = n;
// std::cerr << '\n' << pcomp << '\t' << pmpcvdw_rt
// << '\t' << cpotentials[l][q][m][p]
// << '\t' << n;
// end iterations
n = nmax;
}
else {
if (n > nmax - nstep) {
std::cerr << "\n[ww] Max iterations exceeded\n";
cpotentials[l][q][m][p] = pmpcvdw_rt;
nterms4d[l][q][m][p] = n;
}
}
pcomp = pmpcvdw_rt;
}
}
}
}
}
BOOST_LOG_SEV(lg, info) << "Done computing MPC+vdW potential contact.";
}
// resize nterms4d grid for mpc
inline
void set_nterms(bgrid4d grid4) {
nterms4d.resize(grid4);
}
inline
void compute_mpcvdwpotential_barrier_bf(double cutoff) {
BOOST_LOG_SEV(lg, info) << "Computing MPC+vdW barriers...";
auto start = std::chrono::system_clock::now();
//
#pragma omp parallel for ordered collapse(4) schedule(auto)
for (unsigned int l = 0; l < rarray_size; ++l) {
// iterate in charges particle 1
for (unsigned int q = 0; q < qarray_size; ++q) {
// iterate in radii particle 2
for (unsigned int m = 0; m < rarray_size; ++m) {
// iterate in charges particle 2
for (unsigned int p = 0; p < qarray_size; ++p) {
double r1 = rarray[l];
double q1 = qarray[q];
double r2 = rarray[m];
double q2 = qarray[p];
double rmin = r1+r2;
double rmax = 100.0*rmin;
double min = rmin;
double max = rmax;
short n = nterms4d[l][q][m][p];
eint::force_mpcvdw_funct fmpcvdwfunct(r1, q1, r2, q2, eps, n, AH, cutoff, 1.0/*mpc*/, 1.0/*vdw*/);
// force mpc at contact
double fmpcvdw_rmin = fmpcvdwfunct(rmin);
// Force at r max
double fmpcvdw_rmax = fmpcvdwfunct(rmax);
// checks if minimum exists
if (fmpcvdw_rmin * fmpcvdw_rmax < 0.0) {
// std::cerr << "\n[ii] Mixed phi_rt = " << fmpc_rmin << '\t' <<
// fmpc_rmax;
boost::uintmax_t bmax_iter = ROOT_MAXITER;
tools::eps_tolerance<double> tol = ROOT_TOL;
std::pair<double, double> pair_fmpcvdw;
// try {
pair_fmpcvdw = tools::toms748_solve(fmpcvdwfunct, min, max, fmpcvdw_rmin,
fmpcvdw_rmax, tol, bmax_iter);
if (bmax_iter > 990) {
std::cerr << "\n ERROR max iter " << bmax_iter << "\n\n";
std::terminate();
}
double rbarrier = 0.5 * (pair_fmpcvdw.first + pair_fmpcvdw.second);
if (rbarrier >= min) {
//*********************** USE SAME COEFFICIENTS OF FORCE
//#pragma omp critical
//{
// mpc functor
eint::potential_mpcvdw_funct pmpcvdwfunct(r1, q1, r2, q2, eps, n, AH, cutoff, 1.0/*mpc*/, 1.0/*vdw*/);
// mpc at contact
double pmpcvdw_rb = pmpcvdwfunct(rbarrier);
bpotentials[l][q][m][p] = pmpcvdw_rb;
rbarriers[l][q][m][p] = rbarrier;
//}
}
else {
std::cerr << "\n ERROR Negative rbarrier " << rbarrier << '\n';
std::terminate();
}
}
}
}
}
}
BOOST_LOG_SEV(lg, info) << "Done computing MPC+vdW potential barrier.";
}
inline
void compute_ipavdwpotential_barrier(double cutoff, double vdw=1.0) {
BOOST_LOG_SEV(lg, info) << "Computing IPA+vdW barrier...";
auto start = std::chrono::system_clock::now();
//
// #pragma omp parallel for collapse(4) schedule(auto)
for (unsigned int l = 0; l < rarray_size; ++l) {
// iterate in charges particle 1
for (unsigned int q = 0; q < qarray_size; ++q) {
// iterate in radii particle 2
for (unsigned int m = 0; m < rarray_size; ++m) {
// iterate in charges particle 2
for (unsigned int p = 0; p < qarray_size; ++p) {
double r1 = rarray[l];
double q1 = qarray[q];
double r2 = rarray[m];
double q2 = qarray[p];
double rmin = r1+r2;
double rmax = 100.0*rmin;
double min = rmin;
double max = rmax;
eint::force_ipavdw_funct fipavdw(r1, q1, r2, q2, eps, AH, cutoff, 1.0, vdw);
// force ipa at contact
double fipa_rmin = fipavdw(rmin);
// Force at r max
double fipa_rmax = fipavdw(rmax);
// checks if minimum exists
if (fipa_rmin * fipa_rmax < 0.0) {
// std::cerr << "\n[ii] Mixed phi_rt = " << fipa_rmin << '\t' <<
// fipa_rmax;
boost::uintmax_t bmax_iter = ROOT_MAXITER;
tools::eps_tolerance<double> tol = ROOT_TOL;
std::pair<double, double> pair_fipa;
//try {
pair_fipa = tools::toms748_solve(fipavdw, min, max, fipa_rmin,
fipa_rmax, tol, bmax_iter);
// } catch (const std::exception &exc) {
// std::cerr << '\n' << exc.what() << '\n';
// // std::terminate();
// std::cerr << "\n[ee] No barrier\n";
// }
if (bmax_iter > 990) {
std::cerr << "\n ERROR max iter " << bmax_iter << "\n\n";
std::terminate();
}
double rbarrier = 0.5 * (pair_fipa.first + pair_fipa.second);
if (rbarrier >= min) {
//*********************** USE SAME COEFFICIENTS OF FORCE
// ipa functor
eint::potential_ipavdw_funct pipavdw(r1, q1, r2, q2, eps, AH, cutoff, 1.0, 1.0);
// ipa at contact
double pipa_rbarrier = pipavdw(rbarrier);
// the barrier has the index of the id of reduced pairs
bpotentials[l][q][m][p] = pipa_rbarrier;
rbarriers[l][q][m][p] = rbarrier;
}
else {
std::cerr << "\n ERROR Negative rbarrier " << rbarrier << '\n';
std::terminate();
}
}
}
}
}
}
BOOST_LOG_SEV(lg, info) << "Done computing IPA+vdW potential barrier.";
}
// compute bruteforce
// iterates in the whole grid
inline
int compute_bruteforce() {
auto start = std::chrono::system_clock::now();
#pragma omp parallel for collapse(4) ordered
for (unsigned int l = 0; l < rarray_size; ++l) {
// iterate in charges particle 1
for (unsigned int q = 0; q < qarray_size; ++q) {
// iterate in radii particle 2
for (unsigned int m = 0; m < rarray_size; ++m) {
// iterate in charges particle 2
for (unsigned int p = 0; p < qarray_size; ++p) {
#pragma omp critical
{
double phimin = cpotentials[l][q][m][p];
double phimax = bpotentials[l][q][m][p];
// checks if a barrier exists over potential_threshold
// FIXME change to 0
// if (fabs(phimax) > potential_threshold) {
if (fabs(phimax) != 0.0) {
// double phimax = bpotentials[l][q][m][p];
double eta = eta_barrier(phimin, phimax);
// In the hybrid approach eta can be negative
// in the case of phimin > phimax
if (eta < 0.0) {
if (fabs(eta) > ETA_TOL) {
BOOST_LOG_SEV(lg, warning) << "Negative enhancement factor : "
<< eta;
BOOST_LOG_SEV(lg, warning) << "phimin : " << phimin;
BOOST_LOG_SEV(lg, warning) << "phimax : " << phimax;
//BOOST_LOG_SEV(lg, warning) << "Abort.";
//std::terminate();
if (phimin > 0.0) {
eta = eta_repulsive(phimin);
BOOST_LOG_SEV(lg, warning) << "choosing repulsive phimin, eta: "
<< eta;
}
else {
eta = eta_attractive(phimin);
BOOST_LOG_SEV(lg, warning) << "choosing attractive phimax, eta: "
<< eta;
}
}
else {
BOOST_LOG_SEV(lg, warning) << "eta is negative but less than tolerance";
eta = 0.0;
}
}
//#pragma omp atomic write
efactor[l][q][m][p] = eta;
}
else {
if (phimin > 0.0) {
double eta = eta_repulsive(phimin);
//#pragma omp atomic write
if (fabs(eta) < ETA_TOL) {
BOOST_LOG_SEV(lg, warning) << "eta is less than tolerance";
eta = 0.0;
}
efactor[l][q][m][p] = eta;
}
else {
double eta = eta_attractive(phimin);
//#pragma omp atomic write
if (fabs(eta) < ETA_TOL) {
BOOST_LOG_SEV(lg, warning) << "eta is less than tolerance";
eta = 0.0;
}
efactor[l][q][m][p] = eta;
}
}
}
}
}
}
}
//
auto end = std::chrono::system_clock::now();
auto elapsed_seconds = end-start;
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
return 0;
}
inline void compute_coulombpotential_contact() {
BOOST_LOG_SEV(lg, info)
<< "Computing Coulomb potential at contact for n pairs : "
<< reduced_pairs.size();
#pragma omp parallel for ordered // schedule(nonmonotonic:dynamic)
for (unsigned int i = 0; i < reduced_pairs.size(); ++i) {
unsigned long id = reduced_pairs[i].id_;
double r21 = reduced_pairs[i].r21_;
double q21 = reduced_pairs[i].q21_;
double rt = 1.0 + r21;
eint::potential_coulomb_funct pcoulfunct(r21, q21);
// coulomb at contact
double pcoul_rt = pcoulfunct(rt);
#pragma omp critical
{ fill_contact_potential(contact_potentials[i], id, pcoul_rt, 0); }
}
barrier_potentials.resize(0);
rbarrier_array.resize(0);
BOOST_LOG_SEV(lg, info) << "Done computing Coulomb potential at contact.";
}
inline void compute_coulombvdwpotential_contact(double cutoff) {
BOOST_LOG_SEV(lg, info)
<< "Computing Coulomb+vdW potential at contact for n pairs : ";
auto start = std::chrono::system_clock::now();
//
unsigned long idpp = 0; // index for particle pairs
unsigned long idnp = 0; // index for neutral pairs
// #pragma omp parallel for collapse(4) schedule(auto)
for (unsigned int l = 0; l < rarray_size; ++l) {
// iterate in charges particle 1
for (unsigned int q = 0; q < qarray_size; ++q) {
// iterate in radii particle 2
for (unsigned int m = 0; m < rarray_size; ++m) {
// iterate in charges particle 2
for (unsigned int p = 0; p < qarray_size; ++p) {
double r1 = rarray[l];
double q1 = qarray[q];
double r2 = rarray[m];
double q2 = qarray[p];
double rt = r1+r2;
eint::potential_coulombvdw_funct pcoulvdw(r1, q1, r2, q2, eps, AH, cutoff, 1.0, 1.0);
cpotentials[l][q][m][p] = pcoulvdw(rt);
}
}
}
}
BOOST_LOG_SEV(lg, info) << "Done computing IPA+vdW potential contact.";
}
inline
void compute_coulombvdwpotential_barrier(double cutoff) {
BOOST_LOG_SEV(lg, info) << "Computing Coulomb+vdW barrier...";
auto start = std::chrono::system_clock::now();
//
// #pragma omp parallel for collapse(4) schedule(auto)
for (unsigned int l = 0; l < rarray_size; ++l) {
// iterate in charges particle 1
for (unsigned int q = 0; q < qarray_size; ++q) {
// iterate in radii particle 2
for (unsigned int m = 0; m < rarray_size; ++m) {
// iterate in charges particle 2
for (unsigned int p = 0; p < qarray_size; ++p) {
double r1 = rarray[l];
double q1 = qarray[q];
double r2 = rarray[m];
double q2 = qarray[p];
double rmin = r1+r2;
double rmax = 100.0*rmin;
double min = rmin;
double max = rmax;
eint::force_coulombvdw_funct fcoulombvdw(r1, q1, r2, q2, eps, AH, cutoff, 1.0, 1.0);
// force coulomb at contact
double fcoulomb_rmin = fcoulombvdw(rmin);
// Force at r max
double fcoulomb_rmax = fcoulombvdw(rmax);
// checks if minimum exists
if (fcoulomb_rmin * fcoulomb_rmax < 0.0) {
boost::uintmax_t bmax_iter = ROOT_MAXITER;
tools::eps_tolerance<double> tol = ROOT_TOL;
std::pair<double, double> pair_fcoulomb;
pair_fcoulomb = tools::toms748_solve(fcoulombvdw, min, max, fcoulomb_rmin,
fcoulomb_rmax, tol, bmax_iter);
if (bmax_iter > 990) {
std::cerr << "\n ERROR max iter " << bmax_iter << "\n\n";
std::terminate();
}
double rbarrier = 0.5 * (pair_fcoulomb.first + pair_fcoulomb.second);
if (rbarrier >= min) {
//*********************** USE SAME COEFFICIENTS OF FORCE
// coulomb functor
eint::potential_coulombvdw_funct pcoulombvdw(r1, q1, r2, q2, eps, AH, cutoff, 1.0, 1.0);
// coulomb at contact
double pcoulomb_rbarrier = pcoulombvdw(rbarrier);
// the barrier has the index of the id of reduced pairs
bpotentials[l][q][m][p] = pcoulomb_rbarrier;
rbarriers[l][q][m][p] = rbarrier;
}
else {
std::cerr << "\n ERROR Negative rbarrier " << rbarrier << '\n';
std::terminate();
}
}
}
}
}
}
BOOST_LOG_SEV(lg, info) << "Done computing IPA+vdW potential barrier.";
}
inline void compute_ipapotential_contact() {
BOOST_LOG_SEV(lg, info)
<< "Computing ipa potential at contact for n pairs : "
<< reduced_pairs.size();
#pragma omp parallel for ordered // schedule(nonmonotonic:dynamic)
for (unsigned int i = 0; i < reduced_pairs.size(); ++i) {
// id of particle pair (not repeated)
unsigned long id = reduced_pairs[i].id_;
double r21 = reduced_pairs[i].r21_;
double q21 = reduced_pairs[i].q21_;
double rt = 1.0 + r21;
eint::potential_ipa_funct pipafunct(r21, q21, eps);
// // ipa at contact
double pipa_rt = pipafunct(rt);
// #pragma omp ordered// critical
// {
// contact_potentials has the same index of reduced pairs
// and same id of particle pairs
fill_contact_potential(contact_potentials[i], id, pipa_rt, 0);
}
barrier_potentials.reserve(contact_potentials.size());
rbarrier_array.reserve(contact_potentials.size());
BOOST_LOG_SEV(lg, info) << "Done computing ipa potential at contact.";
}
inline void compute_ipapotential_barrier() {
BOOST_LOG_SEV(lg, info) << "Computing ipa potential barrier for n pairs : "
<< reduced_pairs.size();
#pragma omp \
parallel for // shared(reduced_pairs) schedule(nonmonotonic:dynamic)
for (unsigned int i = 0; i < contact_potentials.size(); ++i) {
unsigned int index = i; // contact_potentials[i].id_;
double r21 = reduced_pairs[index].r21_;
double q21 = reduced_pairs[index].q21_;
double rmin = 1.0 + r21;
double rmax = 100.0 * rmin;
double min = rmin;
double max = rmax;
eint::force_ipa_funct fipafunct(r21, q21, eps);
// force ipa at contact
double fipa_rmin = fipafunct(rmin);
// Force at r max
double fipa_rmax = fipafunct(rmax);
// checks if minimum exists
// #pragma omp ordered
if (fipa_rmin * fipa_rmax < 0.0) {
// std::cerr << "\n[ii] Mixed phi_rt = " << fipa_rmin << '\t' <<
// fipa_rmax;
boost::uintmax_t bmax_iter = ROOT_MAXITER;
tools::eps_tolerance<double> tol = ROOT_TOL;
std::pair<double, double> pair_fipa;
try {
pair_fipa = tools::toms748_solve(fipafunct, min, max, fipa_rmin,
fipa_rmax, tol, bmax_iter);
} catch (const std::exception &exc) {
std::cerr << '\n' << exc.what() << '\n';
// std::terminate();
std::cerr << "\n[ee] No barrier\n";
}
if (bmax_iter > 990) {
std::cerr << "\n ERROR max iter " << bmax_iter << "\n\n";
std::terminate();
}
double rbarrier = 0.5 * (pair_fipa.first + pair_fipa.second);
if (rbarrier >= min) {
//*********************** USE SAME COEFFICIENTS OF FORCE
// ipa functor
eint::potential_ipa_funct pipafunct(r21, q21, eps);
// ipa at contact
double pipa_rbarrier = pipafunct(rbarrier);
// the barrier has the index of the id of reduced pairs
#pragma omp critical
{
ContactPotential barrierpot(index, pipa_rbarrier);
barrier_potentials.push_back(barrierpot);
rbarrier_array.push_back(rbarrier);
}
} else {
std::cerr << "\n ERROR Negative rbarrier " << rbarrier << '\n';
std::terminate();
}
}
}
std::vector<ContactPotential> tmp = barrier_potentials;
barrier_potentials.swap(tmp);
std::vector<ContactPotential>().swap(tmp);
std::vector<double> tmp2 = rbarrier_array;
rbarrier_array.swap(tmp2);
std::vector<double>().swap(tmp2);
BOOST_LOG_SEV(lg, info)
<< "Barrier potentials size : " << barrier_potentials.size();
BOOST_LOG_SEV(lg, info) << "Done computing ipa potential barrier.";
}
inline void compute_mpcpotential_contact() {
BOOST_LOG_SEV(lg, info)
<< "Computing mpc potential at contact for n pairs : "
<< reduced_pairs.size();
// WARNING FIXME
// std::ofstream outfile("pot1.dat");
// outfile << "#n\tr21\tq21\tmpc\tpipa\terror_pot\terror\tmsg\n";
#pragma omp parallel for // schedule(nonmonotonic:dynamic)
for (unsigned int i = 0; i < reduced_pairs.size(); ++i) {
unsigned long id = reduced_pairs[i].id_;
double r21 = reduced_pairs[i].r21_;
double q21 = reduced_pairs[i].q21_;
double rt = 1.0 + r21;
eint::potential_ipa_funct pipafunct(r21, q21, eps);
// // ipa at contact
double pipa_rt = pipafunct(rt);
double pcomp = pipa_rt;
unsigned int initer = 0;
for (short n = nmin; n <= nmax; n += nstep, ++initer) {
// mpc functor
eint::potential_mpc_funct pmpcfunct(r21, q21, eps, n);
// mpc at contact
double pmpc_rt = pmpcfunct(rt);
double error_comp = max_pct_error(pcomp, pmpc_rt);
// double error_pipa = max_pct_error(pipa_rt, pmpc_rt);
// cerr << "\n[ii] n = " << n << "\t err = " << error_comp;
if ((error_comp < MPC_ERROR) && (initer > 0)) {
// #pragma omp critical
// {
fill_contact_potential(contact_potentials[i], id, pmpc_rt, n);
// outfile << n
// << '\t' << r21 << '\t' << q21
// << '\t' << pmpc_rt
// << '\t' << pipa_rt
// << '\t' << error_pipa
// << '\t' << error_comp << '\n';
n = nmax;
} else {
if (n > nmax - nstep) {
std::cerr << "\n[ww] Max iterations exceeded\n";
fill_contact_potential(contact_potentials[i], i, pmpc_rt, n);
}
}
pcomp = pmpc_rt;
}
}
// outfile.close();
barrier_potentials.reserve(contact_potentials.size());
rbarrier_array.reserve(contact_potentials.size());
BOOST_LOG_SEV(lg, info)
<< "Error potentials size : " << error_potentials.size();
BOOST_LOG_SEV(lg, info) << "Done computing MPC potential at contact.";
}
inline double eta_attractive(double phimin) {
double kt = Kboltz * temperature;
return 1.0 - phimin / kt;
}
inline double eta_repulsive(double phimin) {
double kt = Kboltz * temperature;
return exp(-phimin / kt);
}
inline double eta_barrier(double phimin, double phimax) {
double kt = Kboltz * temperature;
return exp(-phimax / kt) * (1.0 + (phimax - phimin) / kt);
}
inline void compute_mpcpotential_barrier() {
BOOST_LOG_SEV(lg, info) << "Computing mpc potential barrier for n pairs : "
<< contact_potentials.size();
#pragma omp \
parallel for // shared(reduced_pairs) schedule(nonmonotonic:dynamic)
for (unsigned int i = 0; i < contact_potentials.size(); ++i) {
unsigned int index = i; // contact_potentials[i].id_;
unsigned int nterms = contact_potentials[i].n_;
double r21 = reduced_pairs[index].r21_;
double q21 = reduced_pairs[index].q21_;
double rmin = 1.0 + r21;
double rmax = 100.0 * rmin;
double min = rmin;
double max = rmax;
eint::force_mpc_funct fmpcfunct(r21, q21, eps, nterms);
// force mpc at contact
double fmpc_rmin = fmpcfunct(rmin);
// Force at r max
double fmpc_rmax = fmpcfunct(rmax);
// checks if minimum exists
if (fmpc_rmin * fmpc_rmax < 0.0) {
// std::cerr << "\n[ii] Mixed phi_rt = " << fmpc_rmin << '\t' <<
// fmpc_rmax;
boost::uintmax_t bmax_iter = ROOT_MAXITER;
tools::eps_tolerance<double> tol = ROOT_TOL;
std::pair<double, double> pair_fmpc;
// try {
pair_fmpc = tools::toms748_solve(fmpcfunct, min, max, fmpc_rmin,
fmpc_rmax, tol, bmax_iter);
// pair_fmpc = tools::bisect(fmpcfunct, min, max, tol, bmax_iter);
// }
// catch(const std::exception& exc) {
// std::cerr << '\n' << exc.what() << '\n';
// std::terminate();
// }
if (bmax_iter > 990) {
std::cerr << "\n ERROR max iter " << bmax_iter << "\n\n";
std::terminate();
}
double rbarrier = 0.5 * (pair_fmpc.first + pair_fmpc.second);
if (rbarrier >= min) {
//*********************** USE SAME COEFFICIENTS OF FORCE
#pragma omp critical
{
// mpc functor
eint::potential_mpc_funct pmpcfunct(r21, q21, eps, nterms);
// mpc at contact
double pmpc_rbarrier = pmpcfunct(rbarrier);
ContactPotential barrierpot(index, pmpc_rbarrier, nterms);
barrier_potentials.push_back(barrierpot);
rbarrier_array.push_back(rbarrier);
}
} else {
std::cerr << "\n ERROR Negative rbarrier " << rbarrier << '\n';
std::terminate();
}
}
}
std::vector<ContactPotential> tmp = barrier_potentials;
barrier_potentials.swap(tmp);
std::vector<ContactPotential>().swap(tmp);
std::vector<double> tmp2 = rbarrier_array;
rbarrier_array.swap(tmp2);
std::vector<double>().swap(tmp2);
BOOST_LOG_SEV(lg, info)
<< "Barrier potentials size : " << barrier_potentials.size();
BOOST_LOG_SEV(lg, info) << "Done computing mpc potential barrier.";
}
inline
void expand_barriers() {
auto start = std::chrono::system_clock::now();
BOOST_LOG_SEV(lg, info) << "Size for array efactor : " << efactor.size();
BOOST_LOG_SEV(lg, info) << "Enhancement: expanding pair potentials";
// iterate in reduced_pairs
#pragma omp parallel for
for (unsigned int irp = 0; irp < reduced_pairs.size(); ++irp) {
// this is the index of reduced pairs, i.e., non repeated pair
unsigned int index = reduced_pairs[irp].id_;
// contact potential for this reduced pairhas the same ordering
// of reduced pairs
double contact_potential = contact_potentials[irp].potential_;
// get repeated combinations (of particle_pairs)
std::vector<long> reps = reduced_pairs[irp].repetitions_;
// find if index is in barrier_potentials, i.e., if a potential
// barrier exists
ContactPotentialIterator barrier_it = std::find_if(
barrier_potentials.begin(), barrier_potentials.end(), find_id(irp));
// iterate in vector of repeated combinations
for (unsigned int jrep = 0; jrep < reps.size(); ++jrep) {
long rep_index = reps[jrep];
short l = particle_pairs[rep_index].l_;
short q = particle_pairs[rep_index].q_;
short m = particle_pairs[rep_index].m_;
short p = particle_pairs[rep_index].p_;
double tr1 = rarray[l]; //(!notswapd ? rarray[l] : rarray[m]);
// std::cerr << "\nn q is " << q;
double tq1 = qarray[q]; //(!notswapd ? qarray[q] : qarray[p]);
double potprefactor = tq1 * tq1 / tr1;
double phimin = potprefactor * contact_potential;
// std::cerr << "\n\n";
// WARNING HACK do nothing for contact potentials
//cpotentials[m][p][l][q] = phimin;
//cpotentials[l][q][m][p] = phimin;
// we have potential barrier
// ** if barrier compute eta
if (barrier_it != barrier_potentials.end()) {
double phimax = potprefactor * (*barrier_it).potential_;
// update potentials
bpotentials[m][p][l][q] = phimax;
bpotentials[l][q][m][p] = phimax;
unsigned int idr = barrier_it - barrier_potentials.begin();
rbarriers[m][p][l][q] = rbarrier_array[idr] * tr1;
rbarriers[l][q][m][p] = rbarrier_array[idr] * tr1;
}
// std::cout << std::endl << l << '\t' << q << '\t' << m << '\t' << p <<
// '\t' << efactor[l][q][m][p];
}
}
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Done expanding pair potentials";
}
inline void compute_enhancement_factor() {
// for (unsigned int l=0; l<rarray_size; ++l) {
// // iterate in charges particle 1
// for (unsigned int q=0; q<qarray_size; ++q) {
// // iterate in radii particle 2
// for (unsigned int m=0; m<rarray_size; ++m) {
// // iterate in charges particle 2
// for (unsigned int p=0; p<qarray_size; ++p) {
// efactor[l][q][m][p] = 0.0;
// cpotentials[l][q][m][p] = 0.0;
// bpotentials[l][q][m][p] = 0.0;
// }
// }
// }
// }
auto start = std::chrono::system_clock::now();
BOOST_LOG_SEV(lg, info) << "Size for array efactor : " << efactor.size();
BOOST_LOG_SEV(lg, info) << "Enhancement: expanding pair potentials";
// iterate in reduced_pairs
#pragma omp parallel for
for (unsigned int irp = 0; irp < reduced_pairs.size(); ++irp) {
// this is the index of reduced pairs, i.e., non repeated pair
unsigned int index = reduced_pairs[irp].id_;
// contact potential for this reduced pairhas the same ordering
// of reduced pairs
double contact_potential = contact_potentials[irp].potential_;
// get repeated combinations (of particle_pairs)
std::vector<long> reps = reduced_pairs[irp].repetitions_;
// find if index is in barrier_potentials, i.e., if a potential
// barrier exists
ContactPotentialIterator barrier_it = std::find_if(
barrier_potentials.begin(), barrier_potentials.end(), find_id(irp));
// iterate in vector of repeated combinations
for (unsigned int jrep = 0; jrep < reps.size(); ++jrep) {
long rep_index = reps[jrep];
short l = particle_pairs[rep_index].l_;
short q = particle_pairs[rep_index].q_;
short m = particle_pairs[rep_index].m_;
short p = particle_pairs[rep_index].p_;
double tr1 = rarray[l]; //(!notswapd ? rarray[l] : rarray[m]);
// std::cerr << "\nn q is " << q;
double tq1 = qarray[q]; //(!notswapd ? qarray[q] : qarray[p]);
double potprefactor = tq1 * tq1 / tr1;
double phimin = potprefactor * contact_potential;
// std::cerr << "\n\n";
cpotentials[m][p][l][q] = phimin;
cpotentials[l][q][m][p] = phimin;
// we have potential barrier
// ** if barrier compute eta
if (barrier_it != barrier_potentials.end()) {
double phimax = potprefactor * (*barrier_it).potential_;
// update potentials
bpotentials[m][p][l][q] = phimax;
bpotentials[l][q][m][p] = phimax;
unsigned int idr = barrier_it - barrier_potentials.begin();
rbarriers[m][p][l][q] = rbarrier_array[idr] * tr1;
rbarriers[l][q][m][p] = rbarrier_array[idr] * tr1;
}
// std::cout << std::endl << l << '\t' << q << '\t' << m << '\t' << p <<
// '\t' << efactor[l][q][m][p];
}
}
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Done expanding pair potentials";
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
BOOST_LOG_SEV(lg, info) << "Computing Enhancement Factor";
start = std::chrono::system_clock::now();
#pragma omp parallel for
for (unsigned int l = 0; l < rarray_size; ++l) {
// iterate in charges particle 1
for (unsigned int q = 0; q < qarray_size; ++q) {
// iterate in radii particle 2
for (unsigned int m = 0; m < rarray_size; ++m) {
// iterate in charges particle 2
for (unsigned int p = 0; p < qarray_size; ++p) {
double phimin = cpotentials[l][q][m][p];
double phimax = bpotentials[l][q][m][p];
// checks if a barrier exists over potential_threshold
// FIXME change to 0
// if (fabs(phimax) > potential_threshold) {
if (fabs(phimax) != 0.0) {
// double phimax = bpotentials[l][q][m][p];
double eta = eta_barrier(phimin, phimax);
// In the hybrid approach eta can be negative
// in the case of phimin > phimax
if (eta < 0.0) {
BOOST_LOG_SEV(lg, warning) << "Negative enhancement factor";
BOOST_LOG_SEV(lg, warning) << "phimin : " << phimin;
BOOST_LOG_SEV(lg, warning) << "phimax : " << phimax;
if (phimin > 0.0) {
eta = eta_repulsive(phimin);
} else {
eta = eta_attractive(phimin);
}
}
//#pragma omp atomic write
efactor[l][q][m][p] = eta;
} else {
if (phimin > 0.0) {
double eta = eta_repulsive(phimin);
//#pragma omp atomic write
efactor[l][q][m][p] = eta;
} else {
double eta = eta_attractive(phimin);
//#pragma omp atomic write
efactor[l][q][m][p] = eta;
}
}
}
}
}
}
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Done computing enhancement factor...";
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
BOOST_LOG_SEV(lg, info)
<< "Computing neutral pairs : " << neutral_pairs.size();
start = std::chrono::system_clock::now();
for (unsigned int i = 0; i < neutral_pairs.size(); ++i) {
short l = neutral_pairs[i].l_;
short q = neutral_pairs[i].q_;
short m = neutral_pairs[i].m_;
short p = neutral_pairs[i].p_;
efactor[l][q][m][p] = 1.0;
efactor[m][p][l][q] = 1.0;
}
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Done computing neutral pairs...";
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
}
//////////////////////
inline void compute_vdwenhancement_factor(double cutoff) {
auto start = std::chrono::system_clock::now();
BOOST_LOG_SEV(lg, info) << "Size for array efactor : " << efactor.size();
BOOST_LOG_SEV(lg, info) << "Enhancement: expanding pair potentials";
// iterate in reduced_pairs
#pragma omp parallel for
for (unsigned int irp = 0; irp < reduced_pairs.size(); ++irp) {
// this is the index of reduced pairs, i.e., non repeated pair
unsigned int index = reduced_pairs[irp].id_;
// contact potential for this reduced pairhas the same ordering
// of reduced pairs
double contact_potential = contact_potentials[irp].potential_;
// get repeated combinations (of particle_pairs)
std::vector<long> reps = reduced_pairs[irp].repetitions_;
// find if index is in barrier_potentials, i.e., if a potential
// barrier exists
ContactPotentialIterator barrier_it = std::find_if(
barrier_potentials.begin(), barrier_potentials.end(), find_id(irp));
// iterate in vector of repeated combinations
for (unsigned int jrep = 0; jrep < reps.size(); ++jrep) {
long rep_index = reps[jrep];
short l = particle_pairs[rep_index].l_;
short q = particle_pairs[rep_index].q_;
short m = particle_pairs[rep_index].m_;
short p = particle_pairs[rep_index].p_;
double tr1 = rarray[l]; //(!notswapd ? rarray[l] : rarray[m]);
// std::cerr << "\nn q is " << q;
double tq1 = qarray[q]; //(!notswapd ? qarray[q] : qarray[p]);
double potprefactor = tq1 * tq1 / tr1;
double phimin = potprefactor * contact_potential;
// std::cerr << "\n\n";
cpotentials[m][p][l][q] = phimin;
cpotentials[l][q][m][p] = phimin;
// we have potential barrier
// ** if barrier compute eta
if (barrier_it != barrier_potentials.end()) {
double phimax = potprefactor * (*barrier_it).potential_;
// update potentials
bpotentials[m][p][l][q] = phimax;
bpotentials[l][q][m][p] = phimax;
unsigned int idr = barrier_it - barrier_potentials.begin();
rbarriers[m][p][l][q] = rbarrier_array[idr] * tr1;
rbarriers[l][q][m][p] = rbarrier_array[idr] * tr1;
}
// std::cout << std::endl << l << '\t' << q << '\t' << m << '\t' << p <<
// '\t' << efactor[l][q][m][p];
}
}
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Done expanding pair potentials";
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
BOOST_LOG_SEV(lg, info) << "Computing Enhancement Factor";
start = std::chrono::system_clock::now();
#pragma omp parallel for
for (unsigned int l = 0; l < rarray_size; ++l) {
// iterate in charges particle 1
for (unsigned int q = 0; q < qarray_size; ++q) {
// iterate in radii particle 2
for (unsigned int m = 0; m < rarray_size; ++m) {
// iterate in charges particle 2
for (unsigned int p = 0; p < qarray_size; ++p) {
double phimin = cpotentials[l][q][m][p]
+ eint::potential_vdw(rarray[l]+rarray[m],
rarray[l],
rarray[m],
cutoff);
double phimax = bpotentials[l][q][m][p];
// checks if a barrier exists over potential_threshold
// FIXME change to 0
// if (fabs(phimax) > potential_threshold) {
if (fabs(phimax) != 0.0) {
// double phimax = bpotentials[l][q][m][p];
double eta = eta_barrier(phimin, phimax);
// In the hybrid approach eta can be negative
// in the case of phimin > phimax
if (eta < 0.0) {
BOOST_LOG_SEV(lg, warning) << "Negative enhancement factor";
BOOST_LOG_SEV(lg, warning) << "phimin : " << phimin;
BOOST_LOG_SEV(lg, warning) << "phimax : " << phimax;
if (phimin > 0.0) {
eta = eta_repulsive(phimin);
} else {
eta = eta_attractive(phimin);
}
}
//#pragma omp atomic write
efactor[l][q][m][p] = eta;
}
else {
if (phimin > 0.0) {
double eta = eta_repulsive(phimin);
//#pragma omp atomic write
efactor[l][q][m][p] = eta;
} else {
double eta = eta_attractive(phimin);
//#pragma omp atomic write
efactor[l][q][m][p] = eta;
}
}
}
}
}
}
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Done computing enhancement factor...";
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
BOOST_LOG_SEV(lg, info)
<< "Computing neutral pairs : " << neutral_pairs.size();
start = std::chrono::system_clock::now();
// for (unsigned int i = 0; i < neutral_pairs.size(); ++i) {
// short l = neutral_pairs[i].l_;
// short q = neutral_pairs[i].q_;
// short m = neutral_pairs[i].m_;
// short p = neutral_pairs[i].p_;
// efactor[l][q][m][p] = 1.0;
// efactor[m][p][l][q] = 1.0;
// }
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Done computing neutral pairs...";
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
}
//////////////////////
inline void compute_enhancementfactor_frompairs() {
auto start = std::chrono::system_clock::now();
BOOST_LOG_SEV(lg, info) << "Size for array efactor : " << efactor.size();
BOOST_LOG_SEV(lg, info) << "Enhancement: expanding pair potentials";
// iterate in reduced_pairs
#pragma omp parallel for
for (unsigned int irp = 0; irp < reduced_pairs.size(); ++irp) {
// this is the index of reduced pairs, i.e., non repeated pair
unsigned int index = reduced_pairs[irp].id_;
// contact potential for this reduced pairhas the same ordering
// of reduced pairs
double contact_potential = contact_potentials[irp].potential_;
// get repeated combinations (of particle_pairs)
std::vector<long> reps = reduced_pairs[irp].repetitions_;
// find if index is in barrier_potentials, i.e., if a potential
// barrier exists
ContactPotentialIterator barrier_it = std::find_if(
barrier_potentials.begin(), barrier_potentials.end(), find_id(irp));
// iterate in vector of repeated combinations
for (unsigned int jrep = 0; jrep < reps.size(); ++jrep) {
long rep_index = reps[jrep];
short l = particle_pairs[rep_index].l_;
short q = particle_pairs[rep_index].q_;
short m = particle_pairs[rep_index].m_;
short p = particle_pairs[rep_index].p_;
double tr1 = rarray[l]; //(!notswapd ? rarray[l] : rarray[m]);
// std::cerr << "\nn q is " << q;
double tq1 = qarray[q]; //(!notswapd ? qarray[q] : qarray[p]);
double potprefactor = tq1 * tq1 / tr1;
double phimin = potprefactor * contact_potential;
// std::cerr << "\n\n";
// we have potential barrier
// ** if barrier compute eta
if (barrier_it != barrier_potentials.end()) {
#pragma omp critical
{
// potential at contact
PairElement bcpe1(m, p, l, q, phimin);
bcpotentials_pe.push_back(bcpe1);
PairElement bcpe2(l, q, m, p, phimin);
bcpotentials_pe.push_back(bcpe2);
double phimax = potprefactor * (*barrier_it).potential_;
PairElement bpe1(m, p, l, q, phimax);
bpotentials_pe.push_back(bpe1);
PairElement bpe2(l, q, m, p, phimax);
bpotentials_pe.push_back(bpe2);
unsigned int idr = barrier_it - barrier_potentials.begin();
double rbb = rbarrier_array[idr] * tr1;
PairElement rpe1(m, p, l, q, rbb);
rbarriers_pe.push_back(rpe1);
PairElement rpe2(l, q, m, p, rbb);
rbarriers_pe.push_back(rpe2);
}
} else { // we dont have a barrier
#pragma omp critical
{
PairElement cpe1(m, p, l, q, phimin);
cpotentials_pe.push_back(cpe1);
PairElement cpe2(l, q, m, p, phimin);
cpotentials_pe.push_back(cpe2);
}
}
// std::cout << std::endl << l << '\t' << q << '\t' << m << '\t' << p <<
// '\t' << efactor[l][q][m][p];
}
}
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Done expanding pair potentials";
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
BOOST_LOG_SEV(lg, info) << "Computing Enhancement Factor";
start = std::chrono::system_clock::now();
// compute enhancement factor monotonic potential
#pragma omp parallel for // ordered
for (unsigned int imon = 0; imon < cpotentials_pe.size(); ++imon) {
short l, q, m, p;
double phimin;
get_pe(cpotentials_pe[imon], l, q, m, p, phimin);
if (phimin > 0.0) {
#pragma omp critical
{
double eta = eta_repulsive(phimin);
PairElement eta_e(l, q, m, p, eta);
efactor_pe.push_back(eta_e);
}
} else {
#pragma omp critical
{
double eta = eta_attractive(phimin);
PairElement eta_e(l, q, m, p, eta);
efactor_pe.push_back(eta_e);
}
}
}
// compute enhancement factor barrier potential
#pragma omp parallel for // ordered
for (unsigned int ibar = 0; ibar < rbarriers_pe.size(); ++ibar) {
short l, q, m, p;
short lx, qx, mx, px;
double phimin, phimax;
get_pe(bcpotentials_pe[ibar], l, q, m, p, phimin);
get_pe(bpotentials_pe[ibar], lx, qx, mx, px, phimax);
// checks if a barrier exists over potential_threshold
// FIXME change to 0
// if (fabs(phimax) > potential_threshold) {
if (fabs(phimax) != 0.0) {
// double phimax = bpotentials[l][q][m][p];
double eta = eta_barrier(phimin, phimax);
// In the hybrid approach eta can be negative
// in the case of phimin > phimax
if (eta < 0.0) {
BOOST_LOG_SEV(lg, warning) << "Negative enhancement factor";
BOOST_LOG_SEV(lg, warning) << "phimin : " << phimin;
BOOST_LOG_SEV(lg, warning) << "phimax : " << phimax;
if (phimin > 0.0) {
eta = eta_repulsive(phimin);
} else {
eta = eta_attractive(phimin);
}
}
//#pragma omp atomic write
#pragma omp critical
{
PairElement eta_e(l, q, m, p, eta);
efactor_pe.push_back(eta_e);
}
}
}
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Done computing enhancement factor...";
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
BOOST_LOG_SEV(lg, info)
<< "Computing neutral pairs : " << neutral_pairs.size();
start = std::chrono::system_clock::now();
for (unsigned int i = 0; i < neutral_pairs.size(); ++i) {
short l = neutral_pairs[i].l_;
short q = neutral_pairs[i].q_;
short m = neutral_pairs[i].m_;
short p = neutral_pairs[i].p_;
PairElement eta_1(l, q, m, p, 1.0);
efactor_pe.push_back(eta_1);
PairElement eta_2(m, p, l, q, 1.0);
efactor_pe.push_back(eta_2);
}
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Done computing neutral pairs...";
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
BOOST_LOG_SEV(lg, info)
<< "Enhancement factor to arrays : " << efactor_pe.size();
start = std::chrono::system_clock::now();
bsgrid2d = {{static_cast<long int>(efactor_pe.size()), 4}};
efindices.resize(bsgrid2d);
daefactor.resize(efactor_pe.size());
//#pragma omp parallel for ordered
for (unsigned int ieta = 0; ieta < efactor_pe.size(); ++ieta) {
short l, q, m, p;
double eta;
get_pe(efactor_pe[ieta], l, q, m, p, eta);
efindices[ieta][0] = l;
efindices[ieta][1] = q;
efindices[ieta][2] = m;
efindices[ieta][3] = p;
daefactor[ieta] = eta;
// std::cerr << "\n" << l << '\t' << q << '\t' << m << '\t' << p <<
// '\t' << eta;
}
// Contact monotonic potentials
bsgrid2d = {{static_cast<long int>(cpotentials_pe.size()), 4}};
cpindices.resize(bsgrid2d);
dacpotentials.resize(cpotentials_pe.size());
for (unsigned int icp = 0; icp < cpotentials_pe.size(); ++icp) {
short l, q, m, p;
double cpot;
get_pe(cpotentials_pe[icp], l, q, m, p, cpot);
cpindices[icp][0] = l;
cpindices[icp][1] = q;
cpindices[icp][2] = m;
cpindices[icp][3] = p;
dacpotentials[icp] = cpot;
// std::cerr << "\n" << l << '\t' << q << '\t' << m << '\t' << p <<
// '\t' << eta;
}
// Barrier potentials
bsgrid2d = {{static_cast<long int>(rbarriers_pe.size()), 4}};
bpindices.resize(bsgrid2d);
darbarriers.resize(rbarriers_pe.size());
dabpotentials.resize(rbarriers_pe.size());
dabcpotentials.resize(rbarriers_pe.size());
for (unsigned int ibar = 0; ibar < rbarriers_pe.size(); ++ibar) {
short l, q, m, p;
double rbar;
get_pe(rbarriers_pe[ibar], l, q, m, p, rbar);
bpindices[ibar][0] = l;
bpindices[ibar][1] = q;
bpindices[ibar][2] = m;
bpindices[ibar][3] = p;
darbarriers[ibar] = rbar;
dabpotentials[ibar] = bpotentials_pe[ibar].value_;
dabcpotentials[ibar] = bcpotentials_pe[ibar].value_;
// std::cerr << "\n" << l << '\t' << q << '\t' << m << '\t' << p <<
// '\t' << eta;
}
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
BOOST_LOG_SEV(lg, info) << "Done enhancement factor to arrays...";
BOOST_LOG_SEV(lg, info) << "Elapsed time : " << elapsed_seconds.count();
}
inline void get_efindices(darray &daefactor_,
boost_short_array2d &efindices_) {
// get number of elements
long int efsize = static_cast<long int>(efindices.shape()[0]);
// resize daefactor and copy elements
daefactor_.resize(efsize);
std::copy(std::begin(daefactor), std::end(daefactor),
std::begin(daefactor_));
// resize indices and copy elements
bsgrid2d = {{efsize, 4}};
efindices_.resize(bsgrid2d);
std::copy(efindices.begin(), efindices.end(), efindices_.begin());
}
inline void get_cpindices(darray &dacpotentials_,
boost_short_array2d &cpindices_) {
// get number of elements
long int cpsize = static_cast<long int>(cpindices.shape()[0]);
// resize vectors and copy elements
dacpotentials_.resize(cpsize);
std::copy(std::begin(dacpotentials), std::end(dacpotentials),
std::begin(dacpotentials_));
// resize indices and copy elements
bsgrid2d = {{cpsize, 4}};
cpindices_.resize(bsgrid2d);
std::copy(cpindices.begin(), cpindices.end(), cpindices_.begin());
}
inline void get_bpindices(darray &dabcpotentials_, darray &dabpotentials_,
darray &darbarriers_,
boost_short_array2d &bpindices_) {
// get number of elements
long int bpsize = static_cast<long int>(bpindices.shape()[0]);
// resize vectors and copy elements
dabcpotentials_.resize(bpsize);
std::copy(std::begin(dabcpotentials), std::end(dabcpotentials),
std::begin(dabcpotentials_));
dabpotentials_.resize(bpsize);
std::copy(std::begin(dabpotentials), std::end(dabpotentials),
std::begin(dabpotentials_));
darbarriers_.resize(bpsize);
std::copy(std::begin(darbarriers), std::end(darbarriers),
std::begin(darbarriers_));
// resize indices and copy elements
bsgrid2d = {{bpsize, 4}};
bpindices_.resize(bsgrid2d);
std::copy(bpindices.begin(), bpindices.end(), bpindices_.begin());
}
//////////////////////
darray rarray;
darray qarray;
// no ref
boost_array4d_ref efactor;
boost_array4d_ref cpotentials;
boost_array4d_ref bpotentials;
boost_array4d_ref rbarriers;
boost_array4d dummy;
boost_short_array4d nterms4d;
darray daefactor;
darray dacpotentials;
darray dabpotentials;
darray dabcpotentials;
darray darbarriers;
double eps;
double AH;
src::severity_logger<severity_level> lg;
unsigned short rarray_size;
unsigned short qarray_size;
unsigned long ncombs;
short nmin;
short nmax;
short nstep;
std::vector<ParticlePair> particle_pairs;
std::vector<ParticlePair> neutral_pairs;
std::vector<ReducedParticlePair> reduced_pairs;
std::vector<ContactPotential> contact_potentials;
std::vector<ContactPotential> error_potentials;
std::vector<ContactPotential> barrier_potentials;
std::vector<PairElement> efactor_pe;
std::vector<PairElement> cpotentials_pe; //!< contact potential no barrier
std::vector<PairElement>
bcpotentials_pe; //!< contact potential when barrier exists
std::vector<PairElement> bpotentials_pe; //!< barrier potential
std::vector<PairElement> rbarriers_pe; //!< barrier location
boost_short_array2d efindices; //!< Array for enhancement factor indices.
boost_short_array2d cpindices; //!< Array for contact potential indices.
boost_short_array2d
bpindices; //!< Array for rbarrier and barrier potential indices.
bshortgrid2d bsgrid2d; //!< Grid for indices
double potential_threshold;
double temperature = 300.0;
std::vector<double> rbarrier_array;
};
} // namespace enhancement
#endif // ENHANCEMENT_H
|
MiniBow.h | /**
* Original File: TemplatedVocabulary.h
* Original Author: Dorian Galvez-Lopez
*
* Modified by: Darius Rückert
* Modifications:
* - Moved everything into this single header file
* - Removed support for non-ORB feature descriptors
* - Optimized loading, saving, matching
* - Removed dependency to opencv
*
* Original License: BSD-like
* https://github.com/dorian3d/DBoW2/blob/master/LICENSE.txt
* License of modifications: MIT
* https://github.com/darglein/DBoW2/blob/master/LICENSE.txt
*
*/
#pragma once
#include <algorithm>
#include <array>
#include <cassert>
#include <cmath>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <map>
#include <numeric>
#include <string>
#include <vector>
namespace MiniBow
{
/// Id of words
typedef unsigned int WordId;
/// Value of a word
typedef double WordValue;
/// Id of nodes in the vocabulary treee
typedef unsigned int NodeId;
/// L-norms for normalization
enum LNorm
{
L1,
L2
};
/// Weighting type
enum WeightingType
{
TF_IDF,
TF,
IDF,
BINARY
};
class FORB
{
public:
using TDescriptor = std::array<uint64_t, 4>;
typedef const TDescriptor* pDescriptor;
static const int L = 32;
/**
* Calculates the mean value of a set of descriptors
* @param descriptors
* @param mean mean descriptor
*/
static void meanValue(const std::vector<pDescriptor>& descriptors, TDescriptor& mean)
{
if (descriptors.empty())
{
return;
}
else if (descriptors.size() == 1)
{
mean = *descriptors[0];
}
else
{
std::vector<int> sum(FORB::L * 8, 0);
for (size_t i = 0; i < descriptors.size(); ++i)
{
const auto& d = *descriptors[i];
const unsigned char* p = (const unsigned char*)d.data();
for (int j = 0; j < 32; ++j, ++p)
{
if (*p & (1 << 7)) ++sum[j * 8];
if (*p & (1 << 6)) ++sum[j * 8 + 1];
if (*p & (1 << 5)) ++sum[j * 8 + 2];
if (*p & (1 << 4)) ++sum[j * 8 + 3];
if (*p & (1 << 3)) ++sum[j * 8 + 4];
if (*p & (1 << 2)) ++sum[j * 8 + 5];
if (*p & (1 << 1)) ++sum[j * 8 + 6];
if (*p & (1)) ++sum[j * 8 + 7];
}
}
std::fill(mean.begin(), mean.end(), 0);
unsigned char* p = (unsigned char*)mean.data();
const int N2 = (int)descriptors.size() / 2 + descriptors.size() % 2;
for (size_t i = 0; i < sum.size(); ++i)
{
if (sum[i] >= N2)
{
// set bit
*p |= 1 << (7 - (i % 8));
}
if (i % 8 == 7) ++p;
}
}
}
/**
* Calculates the distance between two descriptors
* @param a
* @param b
* @return distance
*/
#if !defined(WIN32) && EIGEN_ARCH_i386_OR_x86_64
static inline int popcnt64(uint64_t x)
{
__asm__("popcnt %1, %0" : "=r"(x) : "0"(x));
return x;
}
#else
static inline int popcnt64(uint64_t v)
{
v = v - ((v >> 1) & (uint64_t) ~(uint64_t)0 / 3);
v = (v & (uint64_t) ~(uint64_t)0 / 15 * 3) + ((v >> 2) & (uint64_t) ~(uint64_t)0 / 15 * 3);
v = (v + (v >> 4)) & (uint64_t) ~(uint64_t)0 / 255 * 15;
return (uint64_t)(v * ((uint64_t) ~(uint64_t)0 / 255)) >> (sizeof(uint64_t) - 1) * CHAR_BIT;
}
#endif
static double distance(const TDescriptor& a, const TDescriptor& b)
{
auto pa = (uint64_t*)a.data();
auto pb = (uint64_t*)b.data();
int dist = 0;
for (int i = 0; i < 4; i++, pa++, pb++)
{
uint64_t v = *pa ^ *pb;
dist += popcnt64(v);
}
return dist;
}
};
/// Vector of words to represent images
class BowVector : public std::map<WordId, WordValue>
{
public:
/**
* Adds a value to a word value existing in the vector, or creates a new
* word with the given value
* @param id word id to look for
* @param v value to create the word with, or to add to existing word
*/
void addWeight(WordId id, WordValue v)
{
BowVector::iterator vit = this->lower_bound(id);
if (vit != this->end() && !(this->key_comp()(id, vit->first)))
{
vit->second += v;
}
else
{
this->insert(vit, BowVector::value_type(id, v));
}
}
/**
* Adds a word with a value to the vector only if this does not exist yet
* @param id word id to look for
* @param v value to give to the word if this does not exist
*/
void addIfNotExist(WordId id, WordValue v)
{
BowVector::iterator vit = this->lower_bound(id);
if (vit == this->end() || (this->key_comp()(id, vit->first)))
{
this->insert(vit, BowVector::value_type(id, v));
}
}
/**
* L1-Normalizes the values in the vector
* @param norm_type norm used
*/
void normalize()
{
double norm = 0.0;
BowVector::iterator it;
{
for (it = begin(); it != end(); ++it) norm += std::abs(it->second);
}
if (norm > 0.0)
{
for (it = begin(); it != end(); ++it) it->second /= norm;
}
}
};
class FeatureVector : public std::map<NodeId, std::vector<int>>
{
public:
void addFeature(NodeId id, int i_feature)
{
FeatureVector::iterator vit = this->lower_bound(id);
if (vit != this->end() && vit->first == id)
{
vit->second.push_back(i_feature);
}
else
{
vit = this->insert(vit, FeatureVector::value_type(id, std::vector<int>()));
vit->second.push_back(i_feature);
}
}
};
class L1Scoring
{
public:
// static constexpr inline int id = 0;
enum
{
id = 0
};
static constexpr bool mustNormalize = true;
static double score(const BowVector& v1, const BowVector& v2)
{
BowVector::const_iterator v1_it, v2_it;
const BowVector::const_iterator v1_end = v1.end();
const BowVector::const_iterator v2_end = v2.end();
v1_it = v1.begin();
v2_it = v2.begin();
double score = 0;
while (v1_it != v1_end && v2_it != v2_end)
{
const WordValue& vi = v1_it->second;
const WordValue& wi = v2_it->second;
if (v1_it->first == v2_it->first)
{
score += std::abs(vi - wi) - std::abs(vi) - std::abs(wi);
++v1_it;
++v2_it;
}
else if (v1_it->first < v2_it->first)
{
v1_it = v1.lower_bound(v2_it->first);
}
else
{
v2_it = v2.lower_bound(v1_it->first);
}
}
score = -score / 2.0;
return score; // [0..1]
}
};
/// @param TDescriptor class of descriptor
/// @param F class of descriptor functions
template <class TDescriptor, class F, class Scoring>
/// Generic Vocabulary
class TemplatedVocabulary
{
public:
/**
* Initiates an empty vocabulary
* @param k branching factor
* @param L depth levels
* @param weighting weighting type
* @param scoring scoring type
*/
TemplatedVocabulary(int k = 10, int L = 5, WeightingType weighting = TF_IDF);
/**
* Creates the vocabulary by loading a file
* @param filename
*/
TemplatedVocabulary(const std::string& filename);
/**
* Destructor
*/
virtual ~TemplatedVocabulary();
/**
* Creates a vocabulary from the training features with the already
* defined parameters
* @param training_features
*/
virtual void create(const std::vector<std::vector<TDescriptor>>& training_features);
/**
* Creates a vocabulary from the training features, setting the branching
* factor and the depth levels of the tree
* @param training_features
* @param k branching factor
* @param L depth levels
*/
virtual void create(const std::vector<std::vector<TDescriptor>>& training_features, int k, int L);
/**
* Creates a vocabulary from the training features, setting the branching
* factor nad the depth levels of the tree, and the weighting and scoring
* schemes
*/
virtual void create(const std::vector<std::vector<TDescriptor>>& training_features, int k, int L,
WeightingType weighting);
/**
* Returns the number of words in the vocabulary
* @return number of words
*/
virtual inline unsigned int size() const;
/**
* Returns whether the vocabulary is empty (i.e. it has not been trained)
* @return true iff the vocabulary is empty
*/
virtual inline bool empty() const;
/**
* Transforms a set of descriptores into a bow vector
* @param features
* @param v (out) bow vector of weighted words
*/
virtual void transform(const std::vector<TDescriptor>& features, BowVector& v) const;
/**
* Transform a set of descriptors into a bow vector and a feature vector
* @param features
* @param v (out) bow vector
* @param fv (out) feature vector of nodes and feature indexes
* @param levelsup levels to go up the vocabulary tree to get the node index
*/
virtual void transform(const std::vector<TDescriptor>& features, BowVector& v, FeatureVector& fv,
int levelsup) const;
virtual void transformOMP(const std::vector<TDescriptor>& features, BowVector& v, FeatureVector& fv, int levelsup);
// shared OMP variables
using TransformResult = std::tuple<WordId, NodeId, WordValue>;
int N;
std::vector<TransformResult> transformedFeatures;
/**
* Transforms a single feature into a word (without weight)
* @param feature
* @return word id
*/
virtual WordId transform(const TDescriptor& feature) const;
/**
* Returns the score of two vectors
* @param a vector
* @param b vector
* @return score between vectors
* @note the vectors must be already sorted and normalized if necessary
*/
inline double score(const BowVector& a, const BowVector& b) const;
/**
* Returns the id of the node that is "levelsup" levels from the word given
* @param wid word id
* @param levelsup 0..L
* @return node id. if levelsup is 0, returns the node id associated to the
* word id
*/
virtual NodeId getParentNode(WordId wid, int levelsup) const;
/**
* Returns the ids of all the words that are under the given node id,
* by traversing any of the branches that goes down from the node
* @param nid starting node id
* @param words ids of words
*/
void getWordsFromNode(NodeId nid, std::vector<WordId>& words) const;
/**
* Returns the branching factor of the tree (k)
* @return k
*/
inline int getBranchingFactor() const { return m_k; }
/**
* Returns the depth levels of the tree (L)
* @return L
*/
inline int getDepthLevels() const { return m_L; }
/**
* Returns the real depth levels of the tree on average
* @return average of depth levels of leaves
*/
float getEffectiveLevels() const;
/**
* Returns the descriptor of a word
* @param wid word id
* @return descriptor
*/
virtual inline TDescriptor getWord(WordId wid) const;
/**
* Returns the weight of a word
* @param wid word id
* @return weight
*/
virtual inline WordValue getWordWeight(WordId wid) const;
/**
* Returns the weighting method
* @return weighting method
*/
inline WeightingType getWeightingType() const { return m_weighting; }
/**
* Changes the weighting method
* @param type new weighting type
*/
inline void setWeightingType(WeightingType type);
/**
* Changes the scoring method
* @param type new scoring type
*/
virtual void saveRaw(const std::string& file) const;
virtual void loadRaw(const std::string& file);
/**
* Stops those words whose weight is below minWeight.
* Words are stopped by setting their weight to 0. There are not returned
* later when transforming image features into vectors.
* Note that when using IDF or TF_IDF, the weight is the idf part, which
* is equivalent to -log(f), where f is the frequency of the word
* (f = Ni/N, Ni: number of training images where the word is present,
* N: number of training images).
* Note that the old weight is forgotten, and subsequent calls to this
* function with a lower minWeight have no effect.
* @return number of words stopped now
*/
virtual int stopWords(double minWeight);
protected:
/// Pointer to descriptor
typedef const TDescriptor* pDescriptor;
/// Tree node
struct Node
{
/// Node id
NodeId id;
/// Weight if the node is a word
WordValue weight;
/// Children
std::vector<NodeId> children;
/// Parent node (undefined in case of root)
NodeId parent;
/// Node descriptor
TDescriptor descriptor;
/// Word id if the node is a word
WordId word_id;
/**
* Empty constructor
*/
Node() : id(0), weight(0), parent(0), word_id(0) {}
/**
* Constructor
* @param _id node id
*/
Node(NodeId _id) : id(_id), weight(0), parent(0), word_id(0) {}
/**
* Returns whether the node is a leaf node
* @return true iff the node is a leaf
*/
inline bool isLeaf() const { return children.empty(); }
};
protected:
/**
* Returns a set of pointers to descriptores
* @param training_features all the features
* @param features (out) pointers to the training features
*/
void getFeatures(const std::vector<std::vector<TDescriptor>>& training_features,
std::vector<pDescriptor>& features) const;
/**
* Returns the word id associated to a feature
* @param feature
* @param id (out) word id
* @param weight (out) word weight
* @param nid (out) if given, id of the node "levelsup" levels up
* @param levelsup
*/
virtual void transform(const TDescriptor& feature, WordId& id, WordValue& weight, NodeId* nid = NULL,
int levelsup = 0) const;
/**
* Returns the word id associated to a feature
* @param feature
* @param id (out) word id
*/
virtual void transform(const TDescriptor& feature, WordId& id) const;
/**
* Creates a level in the tree, under the parent, by running kmeans with
* a descriptor set, and recursively creates the subsequent levels too
* @param parent_id id of parent node
* @param descriptors descriptors to run the kmeans on
* @param current_level current level in the tree
*/
void HKmeansStep(NodeId parent_id, const std::vector<pDescriptor>& descriptors, int current_level);
/**
* Creates k clusters from the given descriptors with some seeding algorithm.
* @note In this class, kmeans++ is used, but this function should be
* overriden by inherited classes.
*/
virtual void initiateClusters(const std::vector<pDescriptor>& descriptors,
std::vector<TDescriptor>& clusters) const;
/**
* Creates k clusters from the given descriptor sets by running the
* initial step of kmeans++
* @param descriptors
* @param clusters resulting clusters
*/
void initiateClustersKMpp(const std::vector<pDescriptor>& descriptors, std::vector<TDescriptor>& clusters) const;
/**
* Create the words of the vocabulary once the tree has been built
*/
void createWords();
/**
* Sets the weights of the nodes of tree according to the given features.
* Before calling this function, the nodes and the words must be already
* created (by calling HKmeansStep and createWords)
* @param features
*/
void setNodeWeights(const std::vector<std::vector<TDescriptor>>& features);
/**
* Returns a random number in the range [min..max]
* @param min
* @param max
* @return random T number in [min..max]
*/
template <class T>
static T RandomValue(T min, T max)
{
return ((T)rand() / (T)RAND_MAX) * (max - min) + min;
}
/**
* Returns a random int in the range [min..max]
* @param min
* @param max
* @return random int in [min..max]
*/
static int RandomInt(int min, int max)
{
int d = max - min + 1;
return int(((double)rand() / ((double)RAND_MAX + 1.0)) * d) + min;
}
protected:
/// Branching factor
int m_k;
/// Depth levels
int m_L;
/// Weighting method
WeightingType m_weighting;
/// Tree nodes
std::vector<Node> m_nodes;
/// Words of the vocabulary (tree leaves)
/// this condition holds: m_words[wid]->word_id == wid
std::vector<Node*> m_words;
};
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
TemplatedVocabulary<TDescriptor, F, Scoring>::TemplatedVocabulary(int k, int L, WeightingType weighting)
: m_k(k), m_L(L), m_weighting(weighting)
{
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
TemplatedVocabulary<TDescriptor, F, Scoring>::TemplatedVocabulary(const std::string& filename)
{
loadRaw(filename);
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::setWeightingType(WeightingType type)
{
this->m_weighting = type;
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
TemplatedVocabulary<TDescriptor, F, Scoring>::~TemplatedVocabulary()
{
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::create(
const std::vector<std::vector<TDescriptor>>& training_features)
{
m_nodes.clear();
m_words.clear();
// expected_nodes = Sum_{i=0..L} ( k^i )
int expected_nodes = (int)((std::pow((double)m_k, (double)m_L + 1) - 1) / (m_k - 1));
m_nodes.reserve(expected_nodes); // avoid allocations when creating the tree
std::vector<pDescriptor> features;
getFeatures(training_features, features);
// create root
m_nodes.push_back(Node(0)); // root
// create the tree
HKmeansStep(0, features, 1);
// create the words
createWords();
// and set the weight of each node of the tree
setNodeWeights(training_features);
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::create(
const std::vector<std::vector<TDescriptor>>& training_features, int k, int L)
{
m_k = k;
m_L = L;
create(training_features);
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::create(
const std::vector<std::vector<TDescriptor>>& training_features, int k, int L, WeightingType weighting)
{
m_k = k;
m_L = L;
m_weighting = weighting;
create(training_features);
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::getFeatures(
const std::vector<std::vector<TDescriptor>>& training_features, std::vector<pDescriptor>& features) const
{
features.resize(0);
typename std::vector<std::vector<TDescriptor>>::const_iterator vvit;
typename std::vector<TDescriptor>::const_iterator vit;
for (vvit = training_features.begin(); vvit != training_features.end(); ++vvit)
{
features.reserve(features.size() + vvit->size());
for (vit = vvit->begin(); vit != vvit->end(); ++vit)
{
features.push_back(&(*vit));
}
}
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::HKmeansStep(NodeId parent_id,
const std::vector<pDescriptor>& descriptors,
int current_level)
{
if (descriptors.empty()) return;
// features associated to each cluster
std::vector<TDescriptor> clusters;
std::vector<std::vector<unsigned int>> groups; // groups[i] = [j1, j2, ...]
// j1, j2, ... indices of descriptors associated to cluster i
clusters.reserve(m_k);
groups.reserve(m_k);
// const int msizes[] = { m_k, descriptors.size() };
// cv::SparseMat assoc(2, msizes, CV_8U);
// cv::SparseMat last_assoc(2, msizes, CV_8U);
//// assoc.row(cluster_idx).col(descriptor_idx) = 1 iif associated
if ((int)descriptors.size() <= m_k)
{
// trivial case: one cluster per feature
groups.resize(descriptors.size());
for (unsigned int i = 0; i < descriptors.size(); i++)
{
groups[i].push_back(i);
#ifdef USE_CV_FORB
clusters.push_back(descriptors[i]->clone());
#else
clusters.push_back(*descriptors[i]);
#endif
}
}
else
{
// select clusters and groups with kmeans
bool first_time = true;
bool goon = true;
// to check if clusters move after iterations
std::vector<int> last_association, current_association;
while (goon)
{
// 1. Calculate clusters
if (first_time)
{
// random sample
initiateClusters(descriptors, clusters);
}
else
{
// calculate cluster centres
for (unsigned int c = 0; c < clusters.size(); ++c)
{
std::vector<pDescriptor> cluster_descriptors;
cluster_descriptors.reserve(groups[c].size());
/*
for(unsigned int d = 0; d < descriptors.size(); ++d)
{
if( assoc.find<unsigned char>(c, d) )
{
cluster_descriptors.push_back(descriptors[d]);
}
}
*/
std::vector<unsigned int>::const_iterator vit;
for (vit = groups[c].begin(); vit != groups[c].end(); ++vit)
{
cluster_descriptors.push_back(descriptors[*vit]);
}
F::meanValue(cluster_descriptors, clusters[c]);
}
} // if(!first_time)
// 2. Associate features with clusters
// calculate distances to cluster centers
groups.clear();
groups.resize(clusters.size(), std::vector<unsigned int>());
current_association.resize(descriptors.size());
// assoc.clear();
typename std::vector<pDescriptor>::const_iterator fit;
// unsigned int d = 0;
for (fit = descriptors.begin(); fit != descriptors.end(); ++fit) //, ++d)
{
double best_dist = F::distance(*(*fit), clusters[0]);
unsigned int icluster = 0;
for (unsigned int c = 1; c < clusters.size(); ++c)
{
double dist = F::distance(*(*fit), clusters[c]);
if (dist < best_dist)
{
best_dist = dist;
icluster = c;
}
}
// assoc.ref<unsigned char>(icluster, d) = 1;
groups[icluster].push_back(fit - descriptors.begin());
current_association[fit - descriptors.begin()] = icluster;
}
// kmeans++ ensures all the clusters has any feature associated with them
// 3. check convergence
if (first_time)
{
first_time = false;
}
else
{
// goon = !eqUChar(last_assoc, assoc);
goon = false;
for (unsigned int i = 0; i < current_association.size(); i++)
{
if (current_association[i] != last_association[i])
{
goon = true;
break;
}
}
}
if (goon)
{
// copy last feature-cluster association
last_association = current_association;
// last_assoc = assoc.clone();
}
} // while(goon)
} // if must run kmeans
// create nodes
for (unsigned int i = 0; i < clusters.size(); ++i)
{
NodeId id = m_nodes.size();
m_nodes.push_back(Node(id));
m_nodes.back().descriptor = clusters[i];
m_nodes.back().parent = parent_id;
m_nodes[parent_id].children.push_back(id);
}
// go on with the next level
if (current_level < m_L)
{
// iterate again with the resulting clusters
const std::vector<NodeId>& children_ids = m_nodes[parent_id].children;
for (unsigned int i = 0; i < clusters.size(); ++i)
{
NodeId id = children_ids[i];
std::vector<pDescriptor> child_features;
child_features.reserve(groups[i].size());
std::vector<unsigned int>::const_iterator vit;
for (vit = groups[i].begin(); vit != groups[i].end(); ++vit)
{
child_features.push_back(descriptors[*vit]);
}
if (child_features.size() > 1)
{
HKmeansStep(id, child_features, current_level + 1);
}
}
}
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::initiateClusters(const std::vector<pDescriptor>& descriptors,
std::vector<TDescriptor>& clusters) const
{
initiateClustersKMpp(descriptors, clusters);
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::initiateClustersKMpp(const std::vector<pDescriptor>& pfeatures,
std::vector<TDescriptor>& clusters) const
{
// Implements kmeans++ seeding algorithm
// Algorithm:
// 1. Choose one center uniformly at random from among the data points.
// 2. For each data point x, compute D(x), the distance between x and the nearest
// center that has already been chosen.
// 3. Add one new data point as a center. Each point x is chosen with probability
// proportional to D(x)^2.
// 4. Repeat Steps 2 and 3 until k centers have been chosen.
// 5. Now that the initial centers have been chosen, proceed using standard k-means
// clustering.
clusters.resize(0);
clusters.reserve(m_k);
std::vector<double> min_dists(pfeatures.size(), std::numeric_limits<double>::max());
// 1.
int ifeature = RandomInt(0, pfeatures.size() - 1);
// create first cluster
#ifdef USE_CV_FORB
clusters.push_back(pfeatures[ifeature]->clone());
#else
clusters.push_back(*pfeatures[ifeature]);
#endif
// compute the initial distances
typename std::vector<pDescriptor>::const_iterator fit;
std::vector<double>::iterator dit;
dit = min_dists.begin();
for (fit = pfeatures.begin(); fit != pfeatures.end(); ++fit, ++dit)
{
*dit = F::distance(*(*fit), clusters.back());
}
while ((int)clusters.size() < m_k)
{
// 2.
dit = min_dists.begin();
for (fit = pfeatures.begin(); fit != pfeatures.end(); ++fit, ++dit)
{
if (*dit > 0)
{
double dist = F::distance(*(*fit), clusters.back());
if (dist < *dit) *dit = dist;
}
}
// 3.
double dist_sum = std::accumulate(min_dists.begin(), min_dists.end(), 0.0);
if (dist_sum > 0)
{
double cut_d;
do
{
cut_d = RandomValue<double>(0, dist_sum);
} while (cut_d == 0.0);
double d_up_now = 0;
for (dit = min_dists.begin(); dit != min_dists.end(); ++dit)
{
d_up_now += *dit;
if (d_up_now >= cut_d) break;
}
if (dit == min_dists.end())
ifeature = pfeatures.size() - 1;
else
ifeature = dit - min_dists.begin();
#ifdef USE_CV_FORB
clusters.push_back(pfeatures[ifeature]->clone());
#else
clusters.push_back(*pfeatures[ifeature]);
#endif
} // if dist_sum > 0
else
break;
} // while(used_clusters < m_k)
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::createWords()
{
m_words.resize(0);
if (!m_nodes.empty())
{
m_words.reserve((int)pow((double)m_k, (double)m_L));
typename std::vector<Node>::iterator nit;
nit = m_nodes.begin(); // ignore root
for (++nit; nit != m_nodes.end(); ++nit)
{
if (nit->isLeaf())
{
nit->word_id = m_words.size();
m_words.push_back(&(*nit));
}
}
}
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::setNodeWeights(
const std::vector<std::vector<TDescriptor>>& training_features)
{
const unsigned int NWords = m_words.size();
const unsigned int NDocs = training_features.size();
if (m_weighting == TF || m_weighting == BINARY)
{
// idf part must be 1 always
for (unsigned int i = 0; i < NWords; i++) m_words[i]->weight = 1;
}
else if (m_weighting == IDF || m_weighting == TF_IDF)
{
// IDF and TF-IDF: we calculte the idf path now
// Note: this actually calculates the idf part of the tf-idf score.
// The complete tf-idf score is calculated in ::transform
std::vector<unsigned int> Ni(NWords, 0);
std::vector<bool> counted(NWords, false);
typename std::vector<std::vector<TDescriptor>>::const_iterator mit;
typename std::vector<TDescriptor>::const_iterator fit;
for (mit = training_features.begin(); mit != training_features.end(); ++mit)
{
fill(counted.begin(), counted.end(), false);
for (fit = mit->begin(); fit < mit->end(); ++fit)
{
WordId word_id;
transform(*fit, word_id);
if (!counted[word_id])
{
Ni[word_id]++;
counted[word_id] = true;
}
}
}
// set ln(N/Ni)
for (unsigned int i = 0; i < NWords; i++)
{
if (Ni[i] > 0)
{
m_words[i]->weight = log((double)NDocs / (double)Ni[i]);
} // else // This cannot occur if using kmeans++
}
}
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
inline unsigned int TemplatedVocabulary<TDescriptor, F, Scoring>::size() const
{
return m_words.size();
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
inline bool TemplatedVocabulary<TDescriptor, F, Scoring>::empty() const
{
return m_words.empty();
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
float TemplatedVocabulary<TDescriptor, F, Scoring>::getEffectiveLevels() const
{
long sum = 0;
typename std::vector<Node*>::const_iterator wit;
for (wit = m_words.begin(); wit != m_words.end(); ++wit)
{
const Node* p = *wit;
for (; p->id != 0; sum++) p = &m_nodes[p->parent];
}
return (float)((double)sum / (double)m_words.size());
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
TDescriptor TemplatedVocabulary<TDescriptor, F, Scoring>::getWord(WordId wid) const
{
return m_words[wid]->descriptor;
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
WordValue TemplatedVocabulary<TDescriptor, F, Scoring>::getWordWeight(WordId wid) const
{
return m_words[wid]->weight;
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
WordId TemplatedVocabulary<TDescriptor, F, Scoring>::transform(const TDescriptor& feature) const
{
if (empty())
{
return 0;
}
WordId wid;
transform(feature, wid);
return wid;
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::transform(const std::vector<TDescriptor>& features,
BowVector& v) const
{
v.clear();
if (empty())
{
return;
}
// normalize
// LNorm norm;
// bool must = m_scoring_object->mustNormalize(norm);
typename std::vector<TDescriptor>::const_iterator fit;
if (m_weighting == TF || m_weighting == TF_IDF)
{
for (fit = features.begin(); fit < features.end(); ++fit)
{
WordId id;
WordValue w;
// w is the idf value if TF_IDF, 1 if TF
transform(*fit, id, w);
// not stopped
if (w > 0) v.addWeight(id, w);
}
if (!v.empty() && !Scoring::mustNormalize)
{
// unnecessary when normalizing
const double nd = v.size();
for (BowVector::iterator vit = v.begin(); vit != v.end(); vit++) vit->second /= nd;
}
}
else // IDF || BINARY
{
for (fit = features.begin(); fit < features.end(); ++fit)
{
WordId id;
WordValue w;
// w is idf if IDF, or 1 if BINARY
transform(*fit, id, w);
// not stopped
if (w > 0) v.addIfNotExist(id, w);
} // if add_features
} // if m_weighting == ...
if (Scoring::mustNormalize) v.normalize();
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::transform(const std::vector<TDescriptor>& features, BowVector& v,
FeatureVector& fv, int levelsup) const
{
int N = features.size();
using TransformResult = std::tuple<WordId, NodeId, WordValue>;
std::vector<TransformResult> transformedFeatures(N);
v.clear();
fv.clear();
if (empty()) // safe for subclasses
{
return;
}
if (m_weighting == TF || m_weighting == TF_IDF)
{
for (int i = 0; i < N; ++i)
{
WordId& id = std::get<0>(transformedFeatures[i]);
NodeId& nid = std::get<1>(transformedFeatures[i]);
WordValue& w = std::get<2>(transformedFeatures[i]);
// w is the idf value if TF_IDF, 1 if TF
transform(features[i], id, w, &nid, levelsup);
}
for (int i = 0; i < N; ++i)
{
WordId& id = std::get<0>(transformedFeatures[i]);
NodeId& nid = std::get<1>(transformedFeatures[i]);
WordValue& w = std::get<2>(transformedFeatures[i]);
if (w > 0) // not stopped
{
v.addWeight(id, w);
fv.addFeature(nid, i);
}
}
if (!v.empty() && !Scoring::mustNormalize)
{
// unnecessary when normalizing
const double nd = v.size();
for (BowVector::iterator vit = v.begin(); vit != v.end(); vit++) vit->second /= nd;
}
}
else // IDF || BINARY
{
typename std::vector<TDescriptor>::const_iterator fit;
throw std::runtime_error("not supported");
unsigned int i_feature = 0;
for (fit = features.begin(); fit < features.end(); ++fit, ++i_feature)
{
WordId id;
NodeId nid;
WordValue w;
// w is idf if IDF, or 1 if BINARY
transform(*fit, id, w, &nid, levelsup);
if (w > 0) // not stopped
{
v.addIfNotExist(id, w);
fv.addFeature(nid, i_feature);
}
}
} // if m_weighting == ...
if (Scoring::mustNormalize) v.normalize();
}
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::transformOMP(const std::vector<TDescriptor>& features, BowVector& v,
FeatureVector& fv, int levelsup)
{
#pragma omp single
{
N = features.size();
transformedFeatures.resize(N);
v.clear();
fv.clear();
}
if (empty()) // safe for subclasses
{
return;
}
#pragma omp for
for (int i = 0; i < N; ++i)
{
WordId& id = std::get<0>(transformedFeatures[i]);
NodeId& nid = std::get<1>(transformedFeatures[i]);
WordValue& w = std::get<2>(transformedFeatures[i]);
// w is the idf value if TF_IDF, 1 if TF
transform(features[i], id, w, &nid, levelsup);
}
#pragma omp single
{
for (int i = 0; i < N; ++i)
{
WordId& id = std::get<0>(transformedFeatures[i]);
NodeId& nid = std::get<1>(transformedFeatures[i]);
WordValue& w = std::get<2>(transformedFeatures[i]);
if (w > 0) // not stopped
{
v.addWeight(id, w);
fv.addFeature(nid, i);
}
}
if (!v.empty() && !Scoring::mustNormalize)
{
// unnecessary when normalizing
const double nd = v.size();
for (BowVector::iterator vit = v.begin(); vit != v.end(); vit++) vit->second /= nd;
}
if (Scoring::mustNormalize) v.normalize();
}
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
inline double TemplatedVocabulary<TDescriptor, F, Scoring>::score(const BowVector& v1, const BowVector& v2) const
{
return Scoring::score(v1, v2);
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::transform(const TDescriptor& feature, WordId& id) const
{
WordValue weight;
transform(feature, id, weight);
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::transform(const TDescriptor& feature, WordId& word_id,
WordValue& weight, NodeId* nid, int levelsup) const
{
// propagate the feature down the tree
// std::vector<NodeId> nodes;
// typename std::vector<NodeId>::const_iterator nit;
// level at which the node must be stored in nid, if given
const int nid_level = m_L - levelsup;
if (nid_level <= 0 && nid != NULL) *nid = 0; // root
NodeId final_id = 0; // root
int current_level = 0;
do
{
++current_level;
auto& nodes = m_nodes[final_id].children;
final_id = nodes[0];
double best_d = F::distance(feature, m_nodes[final_id].descriptor);
for (auto nit = nodes.begin() + 1; nit != nodes.end(); ++nit)
{
NodeId id = *nit;
double d = F::distance(feature, m_nodes[id].descriptor);
if (d < best_d)
{
best_d = d;
final_id = id;
}
}
if (nid != NULL && current_level == nid_level) *nid = final_id;
} while (!m_nodes[final_id].isLeaf());
// turn node id into word id
word_id = m_nodes[final_id].word_id;
weight = m_nodes[final_id].weight;
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
NodeId TemplatedVocabulary<TDescriptor, F, Scoring>::getParentNode(WordId wid, int levelsup) const
{
NodeId ret = m_words[wid]->id; // node id
while (levelsup > 0 && ret != 0) // ret == 0 --> root
{
--levelsup;
ret = m_nodes[ret].parent;
}
return ret;
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::getWordsFromNode(NodeId nid, std::vector<WordId>& words) const
{
words.clear();
if (m_nodes[nid].isLeaf())
{
words.push_back(m_nodes[nid].word_id);
}
else
{
words.reserve(m_k); // ^1, ^2, ...
std::vector<NodeId> parents;
parents.push_back(nid);
while (!parents.empty())
{
NodeId parentid = parents.back();
parents.pop_back();
const std::vector<NodeId>& child_ids = m_nodes[parentid].children;
std::vector<NodeId>::const_iterator cit;
for (cit = child_ids.begin(); cit != child_ids.end(); ++cit)
{
const Node& child_node = m_nodes[*cit];
if (child_node.isLeaf())
words.push_back(child_node.word_id);
else
parents.push_back(*cit);
} // for each child
} // while !parents.empty
}
}
// --------------------------------------------------------------------------
template <class TDescriptor, class F, class Scoring>
int TemplatedVocabulary<TDescriptor, F, Scoring>::stopWords(double minWeight)
{
int c = 0;
typename std::vector<Node*>::iterator wit;
for (wit = m_words.begin(); wit != m_words.end(); ++wit)
{
if ((*wit)->weight < minWeight)
{
++c;
(*wit)->weight = 0;
}
}
return c;
}
struct BinaryFile
{
BinaryFile(const std::string& file, std::ios_base::openmode __mode = std::ios_base::in)
: strm(file, std::ios::binary | __mode)
{
}
template <typename T>
void write(const T& v)
{
strm.write(reinterpret_cast<const char*>(&v), sizeof(T));
}
template <typename T>
void write(const std::vector<T>& vec)
{
write((size_t)vec.size());
for (auto& v : vec) write(v);
}
template <typename T>
void read(std::vector<T>& vec)
{
size_t s;
read(s);
vec.resize(s);
for (auto& v : vec) read(v);
}
template <typename T>
void read(T& v)
{
strm.read(reinterpret_cast<char*>(&v), sizeof(T));
}
template <typename T>
BinaryFile& operator<<(const T& v)
{
write(v);
return *this;
}
template <typename T>
BinaryFile& operator>>(T& v)
{
read(v);
return *this;
}
std::fstream strm;
};
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::loadRaw(const std::string& file)
{
BinaryFile bf(file, std::ios_base::in);
if (!bf.strm.is_open())
{
throw std::runtime_error("Could not load Voc file.");
}
int scoringid;
bf >> m_k >> m_L >> scoringid >> m_weighting;
if (m_weighting != TF_IDF)
{
throw std::runtime_error("Only TF_IDF supported.");
}
if (scoringid != Scoring::id)
{
throw std::runtime_error("Scoring id doesn't match template.");
}
size_t nodecount;
bf >> nodecount;
m_nodes.resize(nodecount);
for (Node& n : m_nodes)
{
bf >> n.id >> n.parent >> n.weight >> n.word_id >> n.descriptor;
if (n.id != 0) m_nodes[n.parent].children.push_back(n.id);
}
// words
std::vector<std::pair<int, int>> words;
bf >> words;
m_words.resize(words.size());
for (auto i = 0; i < m_words.size(); ++i)
{
m_words[i] = &m_nodes[words[i].second];
}
}
template <class TDescriptor, class F, class Scoring>
void TemplatedVocabulary<TDescriptor, F, Scoring>::saveRaw(const std::string& file) const
{
BinaryFile bf(file, std::ios_base::out);
bf << m_k << m_L << Scoring::id << m_weighting;
bf << (size_t)m_nodes.size();
for (const Node& n : m_nodes)
{
bf << n.id << n.parent << n.weight << n.word_id << n.descriptor;
}
// words
std::vector<std::pair<int, int>> words;
for (auto i = 0; i < (int)m_words.size(); ++i)
{
words.emplace_back(i, m_words[i]->id);
}
bf << words;
}
// --------------------------------------------------------------------------
/**
* Writes printable information of the vocabulary
* @param os stream to write to
* @param voc
*/
template <class TDescriptor, class F, class Scoring>
std::ostream& operator<<(std::ostream& os, const TemplatedVocabulary<TDescriptor, F, Scoring>& voc)
{
os << "Vocabulary: k = " << voc.getBranchingFactor() << ", L = " << voc.getDepthLevels() << ", Weighting = ";
switch (voc.getWeightingType())
{
case TF_IDF:
os << "tf-idf";
break;
case TF:
os << "tf";
break;
case IDF:
os << "idf";
break;
case BINARY:
os << "binary";
break;
}
os << ", Scoring = ";
switch (Scoring::id)
{
case 0:
os << "L1-norm";
break;
}
os << ", Number of words = " << voc.size();
return os;
}
} // namespace MiniBow
|
core_zgeadd.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include "core_blas.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "core_lapack.h"
/****************************************************************************//*
*
* @ingroup core_geadd
*
* Performs an addition of two general matrices similarly to the
* 'pzgeadd()' function from the PBLAS library:
*
* \f[ B = \alpha * op( A ) + \beta * B, \f]
*
* where op( X ) is one of:
* \f[ op( X ) = X, \f]
* \f[ op( X ) = X^T, \f]
* \f[ op( X ) = X^H, \f]
*
* alpha and beta are scalars and A, B are matrices with op( A ) an m-by-n or
* n-by-m matrix depending on the value of transa and B an m-by-n matrix.
*
*******************************************************************************
*
* @param[in] transa
* Specifies whether the matrix A is non-transposed, transposed, or
* conjugate transposed
* - PlasmaNoTrans: op( A ) = A
* - PlasmaTrans: op( A ) = A^T
* - PlasmaConjTrans: op( A ) = A^H
*
* @param[in] m
* Number of rows of the matrices op( A ) and B.
* m >= 0.
*
* @param[in] n
* Number of columns of the matrices op( A ) and B.
*
* @param[in] alpha
* Scalar factor of A.
*
* @param[in] A
* Matrix of size lda-by-k, where k is n when transa == PlasmaNoTrans
* and m otherwise.
*
* @param[in] lda
* Leading dimension of the array A. lda >= max(1,l), where l is m
* when transa == PlasmaNoTrans and n otherwise.
*
* @param[in] beta
* Scalar factor of B.
*
* @param[in,out] B
* Matrix of size ldb-by-n.
* On exit, B = alpha * op( A ) + beta * B
*
* @param[in] ldb
* Leading dimension of the array B.
* ldb >= max(1,m)
*
******************************************************************************/
int core_zgeadd(plasma_enum_t transa,
int m, int n,
plasma_complex64_t alpha, const plasma_complex64_t *A, int lda,
plasma_complex64_t beta, plasma_complex64_t *B, int ldb)
{
// Check input arguments.
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
coreblas_error("illegal value of transa");
return -1;
}
if (m < 0) {
coreblas_error("illegal value of m");
return -2;
}
if (n < 0) {
coreblas_error("illegal value of n");
return -3;
}
if (A == NULL) {
coreblas_error("NULL A");
return -5;
}
if ((transa == PlasmaNoTrans && lda < imax(1, m) && (m > 0)) ||
(transa != PlasmaNoTrans && lda < imax(1, n) && (n > 0))) {
coreblas_error("illegal value of lda");
return -6;
}
if (B == NULL) {
coreblas_error("NULL B");
return -8;
}
if ((ldb < imax(1, m)) && (m > 0)) {
coreblas_error("illegal value of ldb");
return -9;
}
// quick return
if (m == 0 || n == 0 || (alpha == 0.0 && beta == 1.0))
return PlasmaSuccess;
switch (transa) {
case PlasmaConjTrans:
for (int j = 0; j < n; j++)
for (int i = 0; i < m; i++)
B[ldb*j+i] = beta * B[ldb*j+i] + alpha * conj(A[lda*i+j]);
break;
case PlasmaTrans:
for (int j = 0; j < n; j++)
for (int i = 0; i < m; i++)
B[ldb*j+i] = beta * B[ldb*j+i] + alpha * A[lda*i+j];
break;
case PlasmaNoTrans:
for (int j = 0; j < n; j++)
for (int i = 0; i < m; i++)
B[ldb*j+i] = beta * B[ldb*j+i] + alpha * A[lda*j+i];
}
return PlasmaSuccess;
}
/******************************************************************************/
void core_omp_zgeadd(
plasma_enum_t transa,
int m, int n,
plasma_complex64_t alpha, const plasma_complex64_t *A, int lda,
plasma_complex64_t beta, plasma_complex64_t *B, int ldb,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int k = (transa == PlasmaNoTrans) ? n : m;
#pragma omp task depend(in:A[0:lda*k]) \
depend(inout:B[0:ldb*n])
{
if (sequence->status == PlasmaSuccess) {
int retval = core_zgeadd(transa,
m, n,
alpha, A, lda,
beta, B, ldb);
if (retval != PlasmaSuccess) {
plasma_error("core_zgeadd() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
convolution_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_setzero_ps();
if (bias_data_ptr)
{
_sum = _mm_loadu_ps(bias_data_ptr + p * 4);
}
const float* kptr = weight_data_packed.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const float* sptr = m.row(i * stride_h) + j * stride_w * 4;
for (int k = 0; k < maxk; k++)
{
const float* slptr = sptr + space_ofs[k] * 4;
__m128 _val0 = _mm_load1_ps(slptr);
__m128 _val1 = _mm_load1_ps(slptr + 1);
__m128 _val2 = _mm_load1_ps(slptr + 2);
__m128 _val3 = _mm_load1_ps(slptr + 3);
__m128 _w0 = _mm_load_ps(kptr);
__m128 _w1 = _mm_load_ps(kptr + 4);
__m128 _w2 = _mm_load_ps(kptr + 8);
__m128 _w3 = _mm_load_ps(kptr + 12);
_sum = _mm_comp_fmadd_ps(_val0, _w0, _sum);
_sum = _mm_comp_fmadd_ps(_val1, _w1, _sum);
_sum = _mm_comp_fmadd_ps(_val2, _w2, _sum);
_sum = _mm_comp_fmadd_ps(_val3, _w3, _sum);
kptr += 16;
}
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr + j * 4, _sum);
}
outptr += outw * 4;
}
}
}
|
ten_tusscher_2004_epi_S2_18.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S2_18.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5952182591768,0.00128266400523176,0.780370393090429,0.780208222766858,0.000174041905078485,0.485370727173588,0.00293466121399432,0.999998357055344,1.92482840573537e-08,1.88428105751378e-05,0.999770837182767,1.00699532179645,0.999993733315635,4.75139548173797e-05,0.266377866651071,10.2975786179389,139.536672800382};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.5369194152843,0.000421161732329444,0.000123555730992675,0.000438546024943873,0.268273630830681,0.123585165023946,0.171035514336793,5.02847725301225,0.0110176202871206,1.84752137000130,1095.52052508604,0.000393152126659795,0.528629865494676,0.00975540076461500,0.00491948125354052,8.11442676720905e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
GxB_UnaryOp_xtype_name.c | //------------------------------------------------------------------------------
// GxB_UnaryOp_xtype_name: return the type_name of x for z=f(x)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_UnaryOp_xtype_name // return the name of the type of x
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_UnaryOp unaryop
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_UnaryOp_xtype_name (type_name, op)") ;
GB_RETURN_IF_NULL (type_name) ;
GB_RETURN_IF_NULL_OR_FAULTY (unaryop) ;
ASSERT_UNARYOP_OK (unaryop, "unaryop for xtype_name", GB0) ;
//--------------------------------------------------------------------------
// get the type_name
//--------------------------------------------------------------------------
memcpy (type_name, unaryop->xtype->name, GxB_MAX_NAME_LEN) ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
morn_image_resize.c | /*
Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com>
Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "morn_image.h"
struct HandleBinaryImageResize
{
int height;
int width;
int type;
int *lx;
int *ly;
};
#define HASH_BinaryImageResize 0x3b7f3813
void endBinaryImageResize(void *info)
{
struct HandleBinaryImageResize *handle = (struct HandleBinaryImageResize *)info;
if(handle->lx != NULL) mFree(handle->lx);
if(handle->ly != NULL) mFree(handle->ly);
}
void mBinaryImageResize(MImage *src,MImage *dst,int height,int width,int type)
{
mException(INVALID_IMAGE(src),EXIT,"invalid input");
type = type|0xFC;
MImage *p=dst;
if(INVALID_POINTER(dst)||(dst==src))
{
if((height>0)&&(width>0));
else if((height<=0)&&(type != MORN_RESIZE_UNUNIFORM))
height = (src->height)*width/(src->width);
else if((width<=0)&&(type != MORN_RESIZE_UNUNIFORM))
width = (src->width)*height/(src->height);
else
mException(1,EXIT,"invalid input");
dst = mImageCreate(src->channel,height,width,NULL);
}
else
{
if(height <= 0) height = dst->height;
if(width <= 0) width = dst->width;
if((height>0)&&(width>0));
else if((height<=0)&&(type != MORN_RESIZE_UNUNIFORM))
height = (src->height)*width/(src->width);
else if((width<=0)&&(type != MORN_RESIZE_UNUNIFORM))
width = (src->width)*height/(src->height);
else
mException(1,EXIT,"invalid input");
mImageRedefine(dst,src->channel,height,width,dst->data);
}
MHandle *hdl=mHandle(src,BinaryImageResize);
struct HandleBinaryImageResize *handle = (struct HandleBinaryImageResize *)hdl->handle;
if((hdl->valid == 0)||(handle->height != height)||(handle->width != width)||(handle->type != type))
{
float kx = ((float)(src->width ))/((float)width );
float ky = ((float)(src->height))/((float)height);
if(type == MORN_RESIZE_MINUNIFORM) {kx = MIN(kx,ky); ky = kx;}
else if(type == MORN_RESIZE_MAXUNIFORM) {kx = MAX(kx,ky); ky = kx;}
float scx = ((float)(src->width))/2.0f; float scy = ((float)(src->height))/2.0f;
float dcx = ((float) width )/2.0f; float dcy = ((float) height )/2.0f;
handle->type = type;
if(handle->width <width)
{
if(handle->lx != NULL) mFree(handle->lx);
handle->lx = (int *)mMalloc(width * sizeof(int));
}
handle->width = width;
if(handle->height < height)
{
if(handle->ly != NULL) mFree(handle->ly);
handle->ly = (int *)mMalloc(height * sizeof(int));
}
handle->height = height;
for(int i=0;i<width;i++)
{
float x = ((float)i-dcx)*kx+scx;
handle->lx[i] = (int)(x+0.5);
}
for(int j=0;j<height;j++)
{
float y = ((float)j-dcy)*ky+scy;
handle->ly[j] = (int)(y+0.5);
}
}
int *lx = handle->lx;
int *ly = handle->ly;
int j;
#pragma omp parallel for
for(j=0;j<height;j++)
for(int i=0;i<width;i++)
for(int cn=0;cn<src->channel;cn++)
dst->data[cn][j][i] =src->data[cn][ly[j]][lx[i]];
memcpy(&(dst->info),&(src->info),sizeof(MInfo));
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
hdl->valid = 1;
}
struct HandleImageResize
{
int height;
int width;
int type;
int *lx;
unsigned char *wx;
int *ly;
unsigned char *wy;
};
#define HASH_ImageResize 0x56db84c
void endImageResize(void *info)
{
struct HandleImageResize *handle = (struct HandleImageResize *)info;
if(handle->lx != NULL) mFree(handle->lx);
if(handle->wx != NULL) mFree(handle->wx);
if(handle->ly != NULL) mFree(handle->ly);
if(handle->wy != NULL) mFree(handle->wy);
}
void m_ImageResize(MImage *src,MImage *dst,int height,int width,int type)
{
mException(INVALID_IMAGE(src),EXIT,"invalid input");
if((type|MORN_NEAREST)==MORN_NEAREST) {mBinaryImageResize(src,dst,height,width,type);return;}
MImage *p=dst;
if(INVALID_POINTER(dst)||(dst==src))
{
if((height>0)&&(width>0));
else if((height<=0)&&(type != MORN_RESIZE_UNUNIFORM))
height = (src->height)*width/(src->width);
else if((width<=0)&&(type != MORN_RESIZE_UNUNIFORM))
width = (src->width)*height/(src->height);
else
mException(1,EXIT,"invalid input");
dst = mImageCreate(src->channel,height,width,NULL);
}
else
{
if(height <= 0) height = dst->height;
if(width <= 0) width = dst->width;
if((height>0)&&(width>0));
else if((height<=0)&&(type != MORN_RESIZE_UNUNIFORM))
height = (src->height)*width/(src->width);
else if((width<=0)&&(type != MORN_RESIZE_UNUNIFORM))
width = (src->width)*height/(src->height);
else
mException(1,EXIT,"invalid input");
mImageRedefine(dst,src->channel,height,width,dst->data);
}
MHandle *hdl=mHandle(src,ImageResize);
struct HandleImageResize *handle = (struct HandleImageResize *)(hdl->handle);
if((hdl->valid == 0)||(handle->height != height)||(handle->width != width)||(handle->type != type))
{
float kx = ((float)(src->width ))/((float)width );
float ky = ((float)(src->height))/((float)height);
if(type == MORN_RESIZE_MINUNIFORM) {kx = MIN(kx,ky); ky = kx;}
else if(type == MORN_RESIZE_MAXUNIFORM) {kx = MAX(kx,ky); ky = kx;}
float scx = ((float)(src->width))/2.0f; float scy = ((float)(src->height))/2.0f;
float dcx = ((float) width )/2.0f; float dcy = ((float) height )/2.0f;
handle->type = type;
if(handle->width <width)
{
if(handle->lx != NULL) mFree(handle->lx);
if(handle->wx != NULL) mFree(handle->wx);
handle->lx = ( int *)mMalloc(width * sizeof(int));
handle->wx = (unsigned char *)mMalloc(width * sizeof(unsigned char));
}
handle->width = width;
if(handle->height < height)
{
if(handle->ly != NULL) mFree(handle->ly);
if(handle->wy != NULL) mFree(handle->wy);
handle->ly = ( int *)mMalloc(height * sizeof(int));
handle->wy = (unsigned char *)mMalloc(height * sizeof(unsigned char));
}
handle->height = height;
for(int i=0;i<width;i++)
{
float x = ((float)i-dcx)*kx+scx;
handle->lx[i] = floor(x);
handle->wx[i] = 128 - (unsigned char)((x-(float)(handle->lx[i]))*128.0f);
}
for(int j=0;j<height;j++)
{
float y = ((float)j-dcy)*ky+scy;
handle->ly[j] = floor(y);
handle->wy[j] = 128 - (unsigned char)((y-(float)(handle->ly[j]))*128.0f);
}
}
int *lx = handle->lx; unsigned char *wx = handle->wx;
int *ly = handle->ly; unsigned char *wy = handle->wy;
int j;
#pragma omp parallel for
// for(j=0;j<height;j++)for(int i=0;i<width;i++)
for(j=ImageY1(dst);j<ImageY2(dst);j++)for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++)
{
if((lx[i]<0)||(lx[i]>src->width-1)||(ly[j]<0)||(ly[j]>src->height-1))
{
for(int cn=0;cn<src->channel;cn++)
dst->data[cn][j][i] = 0.0f;
}
else
{
int x1 = lx[i];int x2 = x1+1;
int y1 = ly[j];int y2 = y1+1;
unsigned char wx1 = wx[i];unsigned char wx2 = 128-wx1;
unsigned char wy1 = wy[j];unsigned char wy2 = 128-wy1;
for(int cn=0;cn<src->channel;cn++)
{
dst->data[cn][j][i] =((src->data[cn][y1][x1]*wx1+src->data[cn][y1][x2]*wx2)*wy1
+(src->data[cn][y2][x1]*wx1+src->data[cn][y2][x2]*wx2)*wy2)/16384;
}
}
}
memcpy(&(dst->info),&(src->info),sizeof(MInfo));
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
hdl->valid = 1;
}
|
GB_unaryop__ainv_int64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int64_uint32
// op(A') function: GB_tran__ainv_int64_uint32
// C type: int64_t
// A type: uint32_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int64_uint32
(
int64_t *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gpufont_ttf_file.c | #include <stdlib.h>
#include <stdio.h>
#include <netinet/in.h> /* ntohl */
#include <string.h> /* memset */
#include <stdio.h>
#include "gpufont_data.h"
#include "gpufont_ttf_file.h"
#include "ttf_defs.h"
#include "triangulate.h"
#pragma pack(1)
typedef int8_t int8;
typedef int16_t int16;
typedef int32_t int32;
typedef uint8_t uint8;
typedef uint16_t uint16;
typedef uint32_t uint32;
typedef unsigned uint;
enum {
DEBUG_DUMP = 0, /* enable/disable level 1 debug messages */
DEBUG_DUMP2 = 0, /* enable/disable level 2 debug messages */
ENABLE_OPENMP = 1 /* used for triangulating glyphs. huge speed boost for CJK fonts */
};
/* todo:
- metrics
- support more cmap formats
- proper TTC support
*/
static int read_shorts( FILE *fp, uint16 x[], uint32 count )
{
uint32 n;
if ( fread( x, 2, count, fp ) != count )
return 1;
for( n=0; n<count; n++ )
x[n] = ntohs( x[n] );
return 0;
}
#if ENABLE_COMPOSITE_GLYPHS
/* Used by read_glyph */
static void *read_composite_glyph( FILE *fp, float units_per_em, Font font[1], FontStatus status[1] )
{
/* SubGlyphHeader */
struct {
uint16 flags, glyph_index;
} sgh;
uint16 num = 0; /* temporary counter used to indicate the index of current subglyph in the CompositeGlyph */
void *glyph_data = NULL; /* points to a CompositeGlyph, whose size is not yet known */
/* num_subglyphs begins as zero because of calloc */
glyph_data = calloc( 1, sizeof(size_t) );
if ( !glyph_data ) {
*status = F_FAIL_ALLOC;
return NULL;
}
do {
int16 args[2];
/* These pointers alias glyph_data */
size_t *num_subglyphs;
GlyphIndex *sg_indices;
float *sg_matrix;
float *sg_offset;
int16 fixed_matrix[4];
int e;
num_subglyphs = glyph_data;
*num_subglyphs += 1;
glyph_data = realloc( glyph_data, COMPOSITE_GLYPH_SIZE( *num_subglyphs ) );
if ( !glyph_data ) {
*status = F_FAIL_ALLOC;
return NULL;
}
num_subglyphs = glyph_data;
sg_indices = (GlyphIndex*)( num_subglyphs + 1 );
sg_matrix = (float*)( sg_indices + *num_subglyphs ) + num * 6;
sg_offset = sg_matrix + 4;
if ( read_shorts( fp, &sgh.flags, 2 ) ) {
*status = F_FAIL_EOF;
goto error_handler;
}
if ( sgh.glyph_index >= font->num_glyphs )
sgh.glyph_index = 0;
sg_indices[ num ] = sgh.glyph_index;
if ( sgh.flags & COM_ARGS_ARE_WORDS ) {
/* 16-bit args */
if ( read_shorts( fp, (uint16*) args, 2 ) ) {
*status = F_FAIL_EOF;
goto error_handler;
}
} else {
/* 8-bit args */
int8 temp[2];
if ( fread( temp, 1, 2, fp ) != 2 ) {
*status = F_FAIL_EOF;
goto error_handler;
}
args[0] = temp[0];
args[1] = temp[1];
}
sg_offset[0] = 0;
sg_offset[1] = 0;
if ( sgh.flags & COM_ARGS_ARE_XY_VALUES ) {
/* args are an offset vector */
sg_offset[0] = args[0] / units_per_em;
sg_offset[1] = args[1] / units_per_em;
} else {
/* args are point indices
todo:
1. find the GlyphTriangles that corresponds to glyph_index
2. get coordinates of the relevant points
Either arg1 or arg2 is presumably a point index of the subglyph.
But the composite glyph has no points of it's own, so what does the other argument refer to???
*/
(void) font;
printf( "todo: match points\n" );
}
fixed_matrix[0] = 1;
fixed_matrix[1] = 0;
fixed_matrix[2] = 0;
fixed_matrix[3] = 1;
/* FreeType has "else" between these 3 ifs
But what if all 3 bits are set? */
if ( sgh.flags & COM_HAVE_A_SCALE )
{
if ( read_shorts( fp, (uint16*) fixed_matrix, 1 ) ) {
*status = F_FAIL_EOF;
goto error_handler;
}
fixed_matrix[3] = fixed_matrix[0];
}
else if ( sgh.flags & COM_HAVE_X_AND_Y_SCALE )
{
int16 temp[2];
if ( read_shorts( fp, (uint16*) temp, 2 ) ) {
*status = F_FAIL_EOF;
goto error_handler;
}
fixed_matrix[0] = temp[0];
fixed_matrix[3] = temp[1];
}
else if ( sgh.flags & COM_HAVE_MATRIX )
{
if ( read_shorts( fp, (uint16*) fixed_matrix, 4 ) ) {
*status = F_FAIL_EOF;
goto error_handler;
}
}
for( e=0; e<4; e++ )
sg_matrix[e] = fixed_matrix[e] / (float) ( 1 << 14 );
num++;
} while( sgh.flags & 0x20 );
*status = F_SUCCESS;
return glyph_data;
error_handler:;
if ( glyph_data )
free( glyph_data );
return NULL;
}
#endif
static int read_contour_coord( FILE *fp, PointFlag flags, int32 co[1] )
{
uint8 is_short = flags & PT_SHORT_X;
uint8 is_same = flags & PT_SAME_X;
if ( is_short ) {
/* is_same is now the sign; 0x10=positive, 0x00=negative */
uint8 delta;
if ( fread( &delta, 1, 1, fp ) != 1 )
return 0;
if ( is_same )
*co += delta;
else
*co -= delta;
} else {
/* Use the previous coordinate if same_bit is set
Otherwise, read a 16-bit delta value */
if ( !is_same ) {
int16 delta;
if ( read_shorts( fp, (uint16*) &delta, 1 ) )
return 0;
*co += delta;
}
}
return 1;
}
/* Used by read_glyph */
static SimpleGlyph *read_simple_glyph( FILE *fp, uint16 num_contours, FontStatus status[1] )
{
SimpleGlyph *glyph = NULL;
uint16 *end_points = NULL;
PointCoord *final_points = NULL;
uint32 num_points;
uint32 n;
int32 prev_coord;
PointFlag *final_flags = NULL;
uint16 num_instr;
if ( DEBUG_DUMP2 ) {
printf( "Reading contour data...\n" );
}
end_points = calloc( num_contours, 2 );
if ( !end_points ) {
*status = F_FAIL_ALLOC;
return NULL;
}
if ( read_shorts( fp, end_points, num_contours ) ) {
*status = F_FAIL_EOF;
goto error_handler;
}
num_points = end_points[ num_contours - 1 ] + 1;
if ( num_points > MAX_GLYPH_POINTS ) {
if ( DEBUG_DUMP ) {
printf( "MAX_GLYPH_POINTS too small (need %u)\n", (uint) num_points );
}
*status = F_FAIL_BUFFER_LIMIT;
goto error_handler;
}
final_flags = malloc( MAX_GLYPH_POINTS * sizeof( PointFlag ) );
final_points = malloc( MAX_GLYPH_POINTS * sizeof( PointCoord ) * 2 );
if ( !final_points || !final_flags ) {
*status = F_FAIL_ALLOC;
goto error_handler;
}
/* Skip the hinting instruction */
if ( read_shorts( fp, &num_instr, 1 ) ) {
*status = F_FAIL_EOF;
goto error_handler;
}
if ( fseek( fp, num_instr, SEEK_CUR ) < 0 ) {
*status = F_FAIL_CORRUPT;
goto error_handler;
}
/* Determine the size of X coordinate array by scanning the flags */
n = 0;
while( n < num_points )
{
uint32 end, count=1;
uint8 flags;
if ( fread( &flags, 1, 1, fp ) != 1 ) {
*status = F_FAIL_EOF;
goto error_handler;
}
if ( flags & PT_SAME_FLAGS )
{
uint8 repeat;
if ( fread( &repeat, 1, 1, fp ) != 1 ) {
*status = F_FAIL_EOF;
goto error_handler;
}
count += repeat;
}
end = n + count;
if ( end > num_points ) {
/* more flags than points */
*status = F_FAIL_CORRUPT;
goto error_handler;
}
while( n < end )
final_flags[n++] = flags;
}
if ( n != num_points ) {
/* less flags than points */
*status = F_FAIL_CORRUPT;
goto error_handler;
}
/* Read coordinates. First X, then Y */
*status = F_FAIL_EOF;
for( prev_coord=n=0; n<num_points; n++ ) {
int32 x = prev_coord;
if ( !read_contour_coord( fp, final_flags[n], &x ) )
goto error_handler;
final_points[2*n] = prev_coord = x;
}
for( prev_coord=n=0; n<num_points; n++ ) {
int32 y = prev_coord;
if ( !read_contour_coord( fp, final_flags[n]>>1, &y ) )
goto error_handler;
final_points[2*n+1] = prev_coord = y;
final_flags[n] &= PT_ON_CURVE; /* discard all flags except the one that matters */
}
glyph = calloc( 1, sizeof( SimpleGlyph ) );
/* calloc sets glyph->num_parts to 0, which very is important because that zero tells the glyph is not a composite glyph */
if ( !glyph ) {
*status = F_FAIL_ALLOC;
} else {
glyph->tris.num_points_orig = num_points;
glyph->tris.end_points = end_points;
glyph->tris.points = final_points;
glyph->tris.flags = final_flags;
glyph->tris.num_contours = num_contours;
/* these three must not be free'd since they are in use: */
end_points = NULL;
final_points = NULL;
final_flags = NULL;
if ( DEBUG_DUMP2 )
printf( "Glyph read succesfully\n" );
*status = F_SUCCESS;
}
error_handler:;
if ( final_points ) free( final_points );
if ( final_flags ) free( final_flags );
if ( end_points ) free( end_points );
return glyph;
}
static FontStatus read_glyph( FILE *fp, Font font[1], uint32 glyph_index, uint32 glyph_file_pos, unsigned glyph_counts[2] )
{
/* GlyphHeader */
struct {
uint16 num_contours;
int16 xmin, ymin, xmax, ymax;
} header;
FontStatus status = F_FAIL_IMPOSSIBLE;
if ( fseek( fp, glyph_file_pos, SEEK_SET ) < 0 )
return F_FAIL_CORRUPT;
if ( read_shorts( fp, &header.num_contours, 5 ) )
return F_FAIL_EOF;
if ( header.num_contours >= 0x1000 )
{
#if ENABLE_COMPOSITE_GLYPHS
font->glyphs[ glyph_index ] = read_composite_glyph( fp, units_per_em, font, &status );
glyph_counts[1] += ( status == F_SUCCESS );
if ( DEBUG_DUMP2 && font->glyphs[ glyph_index ] ) {
printf( "Glyph %u is a composite glyph. Has %u components\n", (uint) glyph_index, (uint) font->glyphs[ glyph_index ]->num_parts );
}
#else
status = F_SUCCESS;
#endif
}
else
{
font->glyphs[ glyph_index ] = read_simple_glyph( fp, header.num_contours, &status );
glyph_counts[0] += ( status == F_SUCCESS );
}
return status;
}
/* Reads both 'loca' and 'glyf' tables */
static FontStatus read_all_glyphs( FILE *fp, Font font[1], int16 format, uint32 glyph_base_offset )
{
void *loca_p;
uint32 n = 0;
FontStatus status;
unsigned glyph_counts[2] = {0,0};
if ( DEBUG_DUMP )
printf( "loca format %u (%s)\n", format, format ? "32-bit" : "16-bit" );
if ( format == 0 )
{
/* 16-bit glyph location table */
uint16 *loca, prev_loc;
loca = loca_p = calloc( font->num_glyphs, 2 );
if ( !loca )
return F_FAIL_ALLOC;
if ( read_shorts( fp, loca, font->num_glyphs ) )
status = F_FAIL_EOF;
else
{
prev_loc = loca[1];
status = F_FAIL_INCOMPLETE;
for( n=0; n<font->num_glyphs; n++ )
{
uint32 loc = loca[n];
if ( loc == prev_loc ) {
/* This glyph has no outline and can be left as NULL */
continue;
}
if ( DEBUG_DUMP2 )
printf( "Reading glyph %u out of %u\n", (uint) n, (uint) font->num_glyphs );
prev_loc = loc;
status = read_glyph( fp, font, n, (uint32) loc * 2 + glyph_base_offset, glyph_counts );
if ( status != F_SUCCESS )
break;
}
}
}
else
{
/* 32-bit glyph location table */
uint32 *loca, prev_loc;
loca = loca_p = calloc( font->num_glyphs, 4 );
if ( !loca )
return F_FAIL_ALLOC;
if ( fread( loca, 4, font->num_glyphs, fp ) != font->num_glyphs ) {
status = F_FAIL_EOF;
} else {
prev_loc = loca[1];
status = F_FAIL_INCOMPLETE;
for( n=0; n<font->num_glyphs; n++ )
{
if ( loca[n] == prev_loc )
continue;
if ( DEBUG_DUMP2 )
printf( "Reading glyph %u out of %u\n", (uint) n, (uint) font->num_glyphs );
prev_loc = loca[n];
status = read_glyph( fp, font, n, ntohl( loca[n] ) + glyph_base_offset, glyph_counts );
if ( status != F_SUCCESS )
break;
}
}
}
if ( DEBUG_DUMP )
{
printf( "Read %u out of %u glyphs\n"
"Simple glyphs: %u\n"
"Composite glyphs: %u\n",
(uint) n,
(uint) font->num_glyphs,
glyph_counts[0], glyph_counts[1] );
}
free( loca_p );
return status;
}
static FontStatus read_cmap_format4( FILE *fp, Font font[1], uint32 total_length )
{
uint16 *whole_table;
uint16 *end_codes, *start_codes, *id_range_offset;
int16 *id_delta;
uint16 seg_count, s;
uint32 max_k;
unsigned total_indices = 0;
unsigned n_valid = 0;
/* because format and length have been already read */
total_length -= 2*2;
if ( ( whole_table = malloc( total_length ) ) == NULL )
return F_FAIL_ALLOC;
if ( read_shorts( fp, whole_table, total_length >> 1 ) )
return F_FAIL_EOF;
seg_count = whole_table[1] >> 1;
end_codes = whole_table + 5;
start_codes = end_codes + seg_count + 1;
id_delta = (int16*) start_codes + seg_count;
id_range_offset = start_codes + 2 * seg_count;
max_k = total_length / 2 - ( id_range_offset - whole_table );
if ( DEBUG_DUMP )
printf( "Segments: %u\nmax_k=%u\n", (uint) seg_count, (uint) max_k );
for( s=0; s<seg_count; s++ )
{
uint16 c, start, end, stop;
uint16 idro;
int16 idde;
end = end_codes[s];
start = start_codes[s];
idro = id_range_offset[s];
idde = id_delta[s];
stop = end + 1;
total_indices += end - start + 1;
/*
printf( "start %u end %u idro %u idde %u\n", start, end, idro, idde );
*/
if ( start > end ) {
free( whole_table );
return F_FAIL_CORRUPT;
}
if ( idro != 0 )
{
/* glyphIndex = *( &idRangeOffset[i] + idRangeOffset[i] / 2 + (c - startCode[i]) )
a <= c <= b
glyphIndex = *( &idRangeOffset[i] + idRangeOffset[i] / 2 + (c - startCode[i]) )
glyphIndex = *( &idRangeOffset[i] + idRangeOffset[i] / 2 + c - startCode[i] )
glyphIndex = *( idRangeOffset + i + idRangeOffset[i] / 2 + c - startCode[i] )
glyphIndex = idRangeOffset[ i + idRangeOffset[i] / 2 + c - startCode[i] ]
*/
for( c=start; c != stop; c++ ) {
uint16 k = idro / 2 + c - start; /* + s ??? */
if ( k < max_k )
{
k = id_range_offset[k];
if ( k != 0 )
n_valid += set_cmap_entry( font, c, ( idde + k ) & 0xFFFF );
}
}
}
else
{
/* glyphIndex = idDelta[i] + c,
a <= c <= b
*/
for( c=start; c != stop; c++ )
n_valid += set_cmap_entry( font, c, ( idde + c ) & 0xFFFF );
}
}
if ( DEBUG_DUMP )
{
printf( "Success (%u/%u indices set, %u/%u segs)\n", n_valid, total_indices, (uint) s, (uint) seg_count );
#if USE_BINTREE_CMAP
printf( "Binary tree allocated length: %u\n", font->cmap.data_len );
#endif
}
free( whole_table );
return F_SUCCESS;
}
FontStatus read_cmap( FILE *fp, Font *font )
{
struct { uint16 version, num_tables; } h;
long cmap_header_start = ftell( fp );
int has_read_cmap = 0;
FontStatus status = F_FAIL_INCOMPLETE;
if ( read_shorts( fp, &h.version, 2 ) )
return F_FAIL_EOF;
if ( h.version != 0 )
return F_FAIL_UNSUP_VER;
(void) font->cmap.data_len; /* just to make sure font->cmap is still a NibTree */
memset( &font->cmap, 0, sizeof( font->cmap ) );
while( h.num_tables-- )
{
uint32 temp[2];
uint32 subtable_offset;
uint32 plat_enc; /* combined platform and specific encoding */
long next_tabh_pos;
if ( fread( temp, 4, 2, fp ) != 2 )
return F_FAIL_EOF;
plat_enc = ntohl( temp[0] );
subtable_offset = cmap_header_start + ntohl( temp[1] );
next_tabh_pos = ftell( fp );
if ( fseek( fp, subtable_offset, SEEK_SET ) < 0 )
return F_FAIL_CORRUPT;
else
{
struct { uint16 format, length; } q;
if ( read_shorts( fp, &q.format, 2 ) )
return F_FAIL_EOF;
if ( DEBUG_DUMP ) {
printf( "plat_enc = %08x | platform = %u | encoding = %u | offset=%08x | format=%d | length=%d\n",
plat_enc, plat_enc >> 16, plat_enc & 0xFFFF, subtable_offset, q.format, q.length );
}
/* Most common cmap formats seem to be 4 (the most common of all), 6 and 12
So it seems reasonable to support just format 4 and nothing else */
if ( !has_read_cmap && q.format == 4 )
{
status = read_cmap_format4( fp, font, q.length );
has_read_cmap = 1;
}
}
if ( fseek( fp, next_tabh_pos, SEEK_SET ) < 0 )
return F_FAIL_IMPOSSIBLE;
}
return status;
}
static TrError triangulate_glyphs( Font font[1], size_t first_glyph, size_t last_glyph )
{
size_t n;
TrError err = TR_SUCCESS;
struct Triangulator *trg;
trg = triangulator_begin();
if ( !trg )
return TR_ALLOC_FAIL;
if ( DEBUG_DUMP )
printf( "Triangulating glyphs [%u ... %u]\n", (uint) first_glyph, (uint) last_glyph );
for( n=first_glyph; n<=last_glyph; n++ )
{
SimpleGlyph *glyph = font->glyphs[n];
if ( glyph && IS_SIMPLE_GLYPH( glyph ))
{
err = triangulate_contours( trg, &glyph->tris );
if ( err != TR_SUCCESS )
{
if ( DEBUG_DUMP )
printf( "Triangulation failed. Error code = %u\n", (uint) err );
return err;
}
free( glyph->tris.end_points );
glyph->tris.end_points = NULL;
glyph->tris.num_contours = 0;
}
}
triangulator_end( trg );
return err;
}
static TrError triangulate_all_glyphs( Font font[1] )
{
if ( !font->num_glyphs )
return TR_SUCCESS;
if ( font->num_glyphs > 20 && ENABLE_OPENMP )
{
extern unsigned omp_get_num_procs( void );
extern unsigned omp_get_thread_num( void );
size_t n, numt = omp_get_num_procs();
size_t batch_size = font->num_glyphs / numt;
if ( DEBUG_DUMP )
printf( "Using %d omp threads\n", (uint) numt );
#pragma omp parallel for
for( n=0; n<numt; n++ )
{
size_t start = n * batch_size;
size_t end = start + batch_size - 1;
if ( omp_get_thread_num() == numt - 1 )
end = font->num_glyphs - 1;
triangulate_glyphs( font, start, end );
}
/* errors ignored when using openmp */
return TR_SUCCESS;
}
return triangulate_glyphs( font, 0, font->num_glyphs - 1 );
}
static FontStatus read_hmtx( FILE *fp, Font font[1], unsigned num_hmetrics )
{
LongHorzMetrics *hmetrics = NULL;
FontStatus status;
int size_test[ sizeof(*hmetrics) == 4 ];
(void) size_test;
if ( font->num_glyphs == 0 )
return F_SUCCESS;
hmetrics = malloc( font->num_glyphs * 4 );
if ( !hmetrics )
return F_FAIL_ALLOC;
status = F_FAIL_EOF;
if ( read_shorts( fp, &hmetrics[0].adv_width, 2 * num_hmetrics ) )
goto error_handler;
if ( num_hmetrics < font->num_glyphs )
{
uint16 last_adv_x = hmetrics[ num_hmetrics - 1 ].adv_width;
size_t n, num_lsb, end;
num_lsb = font->num_glyphs - num_hmetrics;
end = num_hmetrics + num_lsb;
status = F_FAIL_EOF;
for( n=num_hmetrics; n<end; n++ )
{
int16 lsb;
if ( fread( &lsb, 2, 1, fp ) != 1 )
goto error_handler;
hmetrics[n].adv_width = last_adv_x;
hmetrics[n].lsb = ntohs( lsb );
}
}
font->hmetrics = hmetrics;
return F_SUCCESS;
error_handler:;
free( hmetrics );
return status;
}
/* Assumes that the file is positioned after the very first field of Offset Table (sfnt version) */
static FontStatus read_offset_table( FILE *fp, Font font[1] )
{
/* Indices of the tables we are interested in.
table_pos and table_len are accessed with these */
enum {
TAB_HEAD=0,
TAB_MAXP,
TAB_LOCA,
TAB_GLYF,
TAB_CMAP,
TAB_HHEA,
TAB_HMTX,
/*
TAB_VHEA,
TAB_VMTX,
*/
NUM_USED_TABLES
};
uint32 table_pos[NUM_USED_TABLES] = {0};
uint16 n, num_tables, num_glyphs;
HeadTable head = {0};
MaxProTableOne maxp = {0};
HorzHeaderTable hhea = {0};
int status;
if ( read_shorts( fp, &num_tables, 1 ) )
return F_FAIL_EOF;
/* Skip rest of the offset table header */
if ( fseek( fp, 3*2, SEEK_CUR ) < 0 )
return F_FAIL_EOF;
for( n=0; n<num_tables; n++ )
{
/* TableRecord */
struct {
uint32 tag;
uint32 checksum;
uint32 file_offset;
uint32 length;
} rec;
int tab_num;
if ( fread( &rec, 4, 4, fp ) != 4 )
return F_FAIL_EOF;
/* todo: remove this ntohl and convert the constants instead */
switch( ntohl( rec.tag ) ) {
case 0x68656164: tab_num = TAB_HEAD; break;
case 0x6d617870: tab_num = TAB_MAXP; break;
case 0x6c6f6361: tab_num = TAB_LOCA; break;
case 0x676c7966: tab_num = TAB_GLYF; break;
case 0x636d6170: tab_num = TAB_CMAP; break;
case 0x68686561: tab_num = TAB_HHEA; break;
case 0x686d7478: tab_num = TAB_HMTX; break;
default:
if ( DEBUG_DUMP )
{
/* todo */
printf( "unsupported table: %.4s\n", (char*) &rec.tag );
}
continue;
}
table_pos[ tab_num ] = ntohl( rec.file_offset );
/* table_len[ tab_num ] = ntohl( rec.length ); */
/* todo: verify checksum */
}
for( n=0; n<NUM_USED_TABLES; n++ ) {
if ( !table_pos[n] ) {
/* Missing important tables */
return F_FAIL_INCOMPLETE;
}
}
/* Read table: "head" */
if ( fseek( fp, table_pos[TAB_HEAD], SEEK_SET ) < 0 )
return F_FAIL_CORRUPT;
if ( fread( &head, 54, 1, fp ) != 1 )
return F_FAIL_EOF;
if ( head.magic != htonl( 0x5F0F3CF5 ) )
return F_FAIL_CORRUPT;
/* Read table: "maxp" */
if ( fseek( fp, table_pos[TAB_MAXP], SEEK_SET ) < 0 )
return F_FAIL_CORRUPT;
if ( fread( &maxp, 6, 1, fp ) != 1 )
return F_FAIL_EOF;
if ( maxp.version == htonl( 0x5000 ) ) {
/* maxp version 0.5 */
} else if ( maxp.version == htonl( 0x10000 ) ) {
/* maxp version 1.0 */
if ( fread( &maxp.max_points, 26, 1, fp ) != 1 )
return F_FAIL_EOF;
} else {
/* unsupported maxp version */
return F_FAIL_UNSUP_VER;
}
num_glyphs = ntohs( maxp.num_glyphs );
font->units_per_em = ntohs( head.units_per_em );
if ( DEBUG_DUMP )
{
printf(
"Font statistics:\n"
"Version: %08x\n"
"Revision: %08x\n"
"Tables: %hu\n"
"head / Flags: %08hx\n"
"head / Units per EM: %u\n"
"maxp 0.5 / Glyphs: %u\n",
(uint) ntohl( head.version ),
(uint) ntohl( head.font_rev ),
(unsigned short) num_tables,
ntohs( head.flags ),
font->units_per_em,
(uint) num_glyphs );
if ( maxp.version == htonl( 0x10000 ) )
{
printf(
"maxp 1.0 / Max contours %hu\n"
"maxp 1.0 / Max points (simple glyph) %hu\n"
"maxp 1.0 / Max contours (simple glyph) %hu\n"
"maxp 1.0 / Max composite recursion %hu\n",
ntohs( maxp.max_contours ),
ntohs( maxp.max_points ),
ntohs( maxp.max_contours ),
ntohs( maxp.max_com_recursion ) );
}
}
font->num_glyphs = num_glyphs;
if (( ( font->glyphs = calloc( num_glyphs, sizeof( font->glyphs[0] ) ) ) == NULL )) return F_FAIL_ALLOC;
if ( fseek( fp, table_pos[TAB_LOCA], SEEK_SET ) < 0 )
return F_FAIL_CORRUPT;
/* Read glyph contours using tables "loca" and "glyf" */
status = read_all_glyphs( fp, font, head.index_to_loc_format, table_pos[TAB_GLYF] );
if ( status != F_SUCCESS )
return status;
/* Read table "cmap" */
if ( fseek( fp, table_pos[TAB_CMAP], SEEK_SET ) < 0 )
return F_FAIL_CORRUPT;
status = read_cmap( fp, font );
if ( status != F_SUCCESS )
return status;
/* Read horizontal metrics header */
if ( fseek( fp, table_pos[TAB_HHEA], SEEK_SET ) < 0 )
return F_FAIL_CORRUPT;
if ( fread( &hhea, sizeof(hhea), 1, fp ) != 1 )
return F_FAIL_EOF;
if ( hhea.version != htonl( 0x10000 ) )
return F_FAIL_UNSUP_VER;
if ( hhea.metric_data_format )
return F_FAIL_UNSUP_VER;
font->horz_ascender = (int16) ntohs( hhea.ascender );
font->horz_descender = (int16) ntohs( hhea.descender );
font->horz_linegap = (int16) ntohs( hhea.linegap );
/* Read horizontal metrics */
if ( fseek( fp, table_pos[TAB_HMTX], SEEK_SET ) < 0 )
return F_FAIL_CORRUPT;
status = read_hmtx( fp, font, ntohs( hhea.num_hmetrics ) );
if ( status != F_SUCCESS )
return status;
/* todo:
handle errors properly
for each glyph:
read location from the 'loca' table
read glyph data
group curves into triangles
subdivide overlapping triangles
generate solid triangles to fill the glyph interior
create VBOs
for each glyph:
upload VBO data
read cmap
read hhead, hmtx (horizontal metrics)
read vhea, vmtx (vertical metrics)
Other useful tables:
BASE - baseline data. Needed to mix glyphs from different scripts (e.g. some math symbols and CJK)
GDEF, GPOS - used to change position of glyphs based on context
GSUB - used to replace glyphs based on context
JSTF - additional positioning crap
post - has some interesting fields: italicAngle, underlinePosition, underlineThickness, isFixedPitch
kern - glyph positioning. same as GPOS but less useful?
name - font name & family name
*/
return F_SUCCESS;
}
static FontStatus read_ttc( FILE *fp, Font font[1] )
{
/* the tag "ttcf" has been already consumed */
uint32 h[3];
if ( fread( h, 4, 3, fp ) != 3 )
return F_FAIL_EOF;
if ( h[0] != htonl( 0x10000 ) && h[0] != htonl( 0x20000 ) ) {
/* unsupported TTC version */
return F_FAIL_UNSUP_VER;
}
if ( h[1] == 0 ) {
/* TTC doesn't contain any fonts. Still a valid TTC though? */
return F_FAIL_INCOMPLETE;
}
/*
The font has at least 1 font
- todo: read more than 1 font
*/
if ( fseek( fp, ntohl( h[2] )+4, SEEK_SET ) < 0 ) {
return F_FAIL_CORRUPT;
}
return read_offset_table( fp, font );
}
FontStatus load_ttf_file( struct Font *font, const char filename[] )
{
FILE *fp = NULL;
uint32 file_ident;
FontStatus status;
memset( font, 0, sizeof(*font) );
fp = fopen( filename, "rb" );
if ( !fp )
return F_FAIL_OPEN;
if ( fread( &file_ident, 4, 1, fp ) != 1 ) {
status = F_FAIL_EOF;
} else {
#define USE_SDL_TIMING 0
#if USE_SDL_TIMING
extern uint32 SDL_GetTicks( void );
uint32 t = SDL_GetTicks();
#endif
if ( file_ident == htonl( 0x10000 ) ) {
/* This is a TrueType font file (sfnt version 1.0)
todo: handle other identifiers ("true", "typ1", "OTTO") */
status = read_offset_table( fp, font );
} else if ( file_ident == *(uint32*)"ttcf" ) {
/* Is a TrueType Collection */
status = read_ttc( fp, font );
} else {
/* Unsupported file format */
status = F_FAIL_UNK_FILEF;
}
if ( status == F_SUCCESS )
{
#if USE_SDL_TIMING
t = SDL_GetTicks() - t;
printf( "File I/O took %u milliseconds\n", (unsigned) t );
#endif
if ( triangulate_all_glyphs( font ) != TR_SUCCESS )
status = F_FAIL_TRIANGULATE;
/* Merges contour points, indices and glyph data into large contiguous blocks of memory */
if ( !merge_glyph_data( font ) )
status = F_FAIL_ALLOC;
}
}
fclose( fp );
return status;
}
|
parallelcode.c | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#include<omp.h>
void sample_rand(const double a, const double b ,const int dim, double *x) {
#pragma omp parallel for
for(int i=0;i<dim;++i) {
double tmp = ((double) rand())/((double) RAND_MAX);
x[i] = (b-a)*tmp + a;
}
}
int main(int argc, char** argv)
{
double start_time = omp_get_wtime();
int NumThreads = 4;
omp_set_num_threads(NumThreads);
long N = atol( argv[1] );
srand(time(NULL)); // each MPI process gets a unique seed
const int dim = 10;
double x[dim]; // array of random numbers
double V = 4.0, integral = 0.0, sum = 0.0;
int count=0;
for(int i=N;i>1;i=i/4)
{
count++; // to get the number of intermediate integrals
}
double integrals[count]; // this array stores all intermediate integral values.
for(int i=0;i<N;i++)
{
sample_rand(-1.,1.,dim,x);
double f;
if(pow(x[0],2)+pow(x[1],2)+pow(x[2],2)+pow(x[3],2)+pow(x[4],2)+pow(x[5],2)+pow(x[6],2)+pow(x[7],2)+pow(x[8],2)+pow(x[9],2)<=1)
f=1.0;
else
f=0.0;
sum+=f;
int k=1;
for(int j=1;j<=i+1;j=pow(4,k))
{
if(i+1==j)
//printf("For iteration %d integral is %lf\n", j,V*sum/N);
integrals[k-1]=V*sum/N;
k++;
}
}
for(int i=0;i<count;i++)
{
printf("%lf %e %e %e\n",pow(4,i+1),integrals[i] ,fabs(integrals[i]-0.0), fabs(integrals[i]-integrals[i+1]));
}
integral = V*sum/N;
double time = omp_get_wtime() - start_time;
//uncomment printf to get integral value
//printf("final integral is:%e\n",integral);
printf("%lf",time);
return 0;
}
|
array_init_2.c | // Test the handling of two loops under omp for
// watch the loop index replacement (private by default)
// and tje array outlining
int main(void)
{
int i, j;
float** u = (float**) malloc( 500 * sizeof( float*) );
for( i=0; i<500; i++)
u[i] = (float*) malloc( 500 * sizeof(float) );
#pragma omp parallel for
for (i=0; i<500; i++)
for (j=0; j<500; j++)
{
u[i][j] = 0.0;
}
return 0;
}
|
feature.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF EEEEE AAA TTTTT U U RRRR EEEEE %
% F E A A T U U R R E %
% FFF EEE AAAAA T U U RRRR EEE %
% F E A A T U U R R E %
% F EEEEE A A T UUU R R EEEEE %
% %
% %
% MagickCore Image Feature Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/feature.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a n n y E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of
% edges in images.
%
% The format of the CannyEdgeImage method is:
%
% Image *CannyEdgeImage(const Image *image,const double radius,
% const double sigma,const double lower_percent,
% const double upper_percent,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the gaussian smoothing filter.
%
% o sigma: the sigma of the gaussian smoothing filter.
%
% o lower_percent: percentage of edge pixels in the lower threshold.
%
% o upper_percent: percentage of edge pixels in the upper threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _CannyInfo
{
double
magnitude,
intensity;
int
orientation;
ssize_t
x,
y;
} CannyInfo;
static inline MagickBooleanType IsAuthenticPixel(const Image *image,
const ssize_t x,const ssize_t y)
{
if ((x < 0) || (x >= (ssize_t) image->columns))
return(MagickFalse);
if ((y < 0) || (y >= (ssize_t) image->rows))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view,
MatrixInfo *canny_cache,const ssize_t x,const ssize_t y,
const double lower_threshold,ExceptionInfo *exception)
{
CannyInfo
edge,
pixel;
MagickBooleanType
status;
register Quantum
*q;
register ssize_t
i;
q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
*q=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
edge.x=x;
edge.y=y;
if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
for (i=1; i != 0; )
{
ssize_t
v;
i--;
status=GetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
for (v=(-1); v <= 1; v++)
{
ssize_t
u;
for (u=(-1); u <= 1; u++)
{
if ((u == 0) && (v == 0))
continue;
if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse)
continue;
/*
Not an edge if gradient value is below the lower threshold.
*/
q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1,
exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel);
if (status == MagickFalse)
return(MagickFalse);
if ((GetPixelIntensity(edge_image,q) == 0.0) &&
(pixel.intensity >= lower_threshold))
{
*q=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
edge.x+=u;
edge.y+=v;
status=SetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
i++;
}
}
}
}
return(MagickTrue);
}
MagickExport Image *CannyEdgeImage(const Image *image,const double radius,
const double sigma,const double lower_percent,const double upper_percent,
ExceptionInfo *exception)
{
#define CannyEdgeImageTag "CannyEdge/Image"
CacheView
*edge_view;
CannyInfo
element;
char
geometry[MagickPathExtent];
double
lower_threshold,
max,
min,
upper_threshold;
Image
*edge_image;
KernelInfo
*kernel_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*canny_cache;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Filter out noise.
*/
(void) FormatLocaleString(geometry,MagickPathExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
edge_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
if (TransformImageColorspace(edge_image,GRAYColorspace,exception) == MagickFalse)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(edge_image,OffAlphaChannel,exception);
/*
Find the intensity gradient of the image.
*/
canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows,
sizeof(CannyInfo),exception);
if (canny_cache == (MatrixInfo *) NULL)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
status=MagickTrue;
edge_view=AcquireVirtualCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
double
dx,
dy;
register const Quantum
*magick_restrict kernel_pixels;
ssize_t
v;
static double
Gx[2][2] =
{
{ -1.0, +1.0 },
{ -1.0, +1.0 }
},
Gy[2][2] =
{
{ +1.0, +1.0 },
{ -1.0, -1.0 }
};
(void) memset(&pixel,0,sizeof(pixel));
dx=0.0;
dy=0.0;
kernel_pixels=p;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
double
intensity;
intensity=GetPixelIntensity(edge_image,kernel_pixels+u);
dx+=0.5*Gx[v][u]*intensity;
dy+=0.5*Gy[v][u]*intensity;
}
kernel_pixels+=edge_image->columns+1;
}
pixel.magnitude=hypot(dx,dy);
pixel.orientation=0;
if (fabs(dx) > MagickEpsilon)
{
double
slope;
slope=dy/dx;
if (slope < 0.0)
{
if (slope < -2.41421356237)
pixel.orientation=0;
else
if (slope < -0.414213562373)
pixel.orientation=1;
else
pixel.orientation=2;
}
else
{
if (slope > 2.41421356237)
pixel.orientation=0;
else
if (slope > 0.414213562373)
pixel.orientation=3;
else
pixel.orientation=2;
}
}
if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse)
continue;
p+=GetPixelChannels(edge_image);
}
}
edge_view=DestroyCacheView(edge_view);
/*
Non-maxima suppression, remove pixels that are not considered to be part
of an edge.
*/
progress=0;
(void) GetMatrixElement(canny_cache,0,0,&element);
max=element.intensity;
min=element.intensity;
edge_view=AcquireAuthenticCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
alpha_pixel,
beta_pixel,
pixel;
(void) GetMatrixElement(canny_cache,x,y,&pixel);
switch (pixel.orientation)
{
case 0:
default:
{
/*
0 degrees, north and south.
*/
(void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel);
break;
}
case 1:
{
/*
45 degrees, northwest and southeast.
*/
(void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel);
break;
}
case 2:
{
/*
90 degrees, east and west.
*/
(void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel);
break;
}
case 3:
{
/*
135 degrees, northeast and southwest.
*/
(void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel);
(void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel);
break;
}
}
pixel.intensity=pixel.magnitude;
if ((pixel.magnitude < alpha_pixel.magnitude) ||
(pixel.magnitude < beta_pixel.magnitude))
pixel.intensity=0;
(void) SetMatrixElement(canny_cache,x,y,&pixel);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
{
if (pixel.intensity < min)
min=pixel.intensity;
if (pixel.intensity > max)
max=pixel.intensity;
}
*q=0;
q+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse)
status=MagickFalse;
}
edge_view=DestroyCacheView(edge_view);
/*
Estimate hysteresis threshold.
*/
lower_threshold=lower_percent*(max-min)+min;
upper_threshold=upper_percent*(max-min)+min;
/*
Hysteresis threshold.
*/
edge_view=AcquireAuthenticCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
register const Quantum
*magick_restrict p;
/*
Edge if pixel gradient higher than upper threshold.
*/
p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception);
if (p == (const Quantum *) NULL)
continue;
status=GetMatrixElement(canny_cache,x,y,&pixel);
if (status == MagickFalse)
continue;
if ((GetPixelIntensity(edge_image,p) == 0.0) &&
(pixel.intensity >= upper_threshold))
status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold,
exception);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
edge_view=DestroyCacheView(edge_view);
/*
Free resources.
*/
canny_cache=DestroyMatrixInfo(canny_cache);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e F e a t u r e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageFeatures() returns features for each channel in the image in
% each of four directions (horizontal, vertical, left and right diagonals)
% for the specified distance. The features include the angular second
% moment, contrast, correlation, sum of squares: variance, inverse difference
% moment, sum average, sum varience, sum entropy, entropy, difference variance,
% difference entropy, information measures of correlation 1, information
% measures of correlation 2, and maximum correlation coefficient. You can
% access the red channel contrast, for example, like this:
%
% channel_features=GetImageFeatures(image,1,exception);
% contrast=channel_features[RedPixelChannel].contrast[0];
%
% Use MagickRelinquishMemory() to free the features buffer.
%
% The format of the GetImageFeatures method is:
%
% ChannelFeatures *GetImageFeatures(const Image *image,
% const size_t distance,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o distance: the distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelFeatures *GetImageFeatures(const Image *image,
const size_t distance,ExceptionInfo *exception)
{
typedef struct _ChannelStatistics
{
PixelInfo
direction[4]; /* horizontal, vertical, left and right diagonals */
} ChannelStatistics;
CacheView
*image_view;
ChannelFeatures
*channel_features;
ChannelStatistics
**cooccurrence,
correlation,
*density_x,
*density_xy,
*density_y,
entropy_x,
entropy_xy,
entropy_xy1,
entropy_xy2,
entropy_y,
mean,
**Q,
*sum,
sum_squares,
variance;
PixelPacket
gray,
*grays;
MagickBooleanType
status;
register ssize_t
i,
r;
size_t
length;
unsigned int
number_grays;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns < (distance+1)) || (image->rows < (distance+1)))
return((ChannelFeatures *) NULL);
length=MaxPixelChannels+1UL;
channel_features=(ChannelFeatures *) AcquireQuantumMemory(length,
sizeof(*channel_features));
if (channel_features == (ChannelFeatures *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_features,0,length*
sizeof(*channel_features));
/*
Form grays.
*/
grays=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays));
if (grays == (PixelPacket *) NULL)
{
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
for (i=0; i <= (ssize_t) MaxMap; i++)
{
grays[i].red=(~0U);
grays[i].green=(~0U);
grays[i].blue=(~0U);
grays[i].alpha=(~0U);
grays[i].black=(~0U);
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (r=0; r < (ssize_t) image->rows; r++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,r,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
grays[ScaleQuantumToMap(GetPixelRed(image,p))].red=
ScaleQuantumToMap(GetPixelRed(image,p));
grays[ScaleQuantumToMap(GetPixelGreen(image,p))].green=
ScaleQuantumToMap(GetPixelGreen(image,p));
grays[ScaleQuantumToMap(GetPixelBlue(image,p))].blue=
ScaleQuantumToMap(GetPixelBlue(image,p));
if (image->colorspace == CMYKColorspace)
grays[ScaleQuantumToMap(GetPixelBlack(image,p))].black=
ScaleQuantumToMap(GetPixelBlack(image,p));
if (image->alpha_trait != UndefinedPixelTrait)
grays[ScaleQuantumToMap(GetPixelAlpha(image,p))].alpha=
ScaleQuantumToMap(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
return(channel_features);
}
(void) memset(&gray,0,sizeof(gray));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if (grays[i].red != ~0U)
grays[gray.red++].red=grays[i].red;
if (grays[i].green != ~0U)
grays[gray.green++].green=grays[i].green;
if (grays[i].blue != ~0U)
grays[gray.blue++].blue=grays[i].blue;
if (image->colorspace == CMYKColorspace)
if (grays[i].black != ~0U)
grays[gray.black++].black=grays[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
if (grays[i].alpha != ~0U)
grays[gray.alpha++].alpha=grays[i].alpha;
}
/*
Allocate spatial dependence matrix.
*/
number_grays=gray.red;
if (gray.green > number_grays)
number_grays=gray.green;
if (gray.blue > number_grays)
number_grays=gray.blue;
if (image->colorspace == CMYKColorspace)
if (gray.black > number_grays)
number_grays=gray.black;
if (image->alpha_trait != UndefinedPixelTrait)
if (gray.alpha > number_grays)
number_grays=gray.alpha;
cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays,
sizeof(*cooccurrence));
density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_x));
density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_xy));
density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_y));
Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q));
sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum));
if ((cooccurrence == (ChannelStatistics **) NULL) ||
(density_x == (ChannelStatistics *) NULL) ||
(density_xy == (ChannelStatistics *) NULL) ||
(density_y == (ChannelStatistics *) NULL) ||
(Q == (ChannelStatistics **) NULL) ||
(sum == (ChannelStatistics *) NULL))
{
if (Q != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
}
if (sum != (ChannelStatistics *) NULL)
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
if (density_y != (ChannelStatistics *) NULL)
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
if (density_xy != (ChannelStatistics *) NULL)
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
if (density_x != (ChannelStatistics *) NULL)
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
if (cooccurrence != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(
cooccurrence);
}
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
(void) memset(&correlation,0,sizeof(correlation));
(void) memset(density_x,0,2*(number_grays+1)*sizeof(*density_x));
(void) memset(density_xy,0,2*(number_grays+1)*sizeof(*density_xy));
(void) memset(density_y,0,2*(number_grays+1)*sizeof(*density_y));
(void) memset(&mean,0,sizeof(mean));
(void) memset(sum,0,number_grays*sizeof(*sum));
(void) memset(&sum_squares,0,sizeof(sum_squares));
(void) memset(density_xy,0,2*number_grays*sizeof(*density_xy));
(void) memset(&entropy_x,0,sizeof(entropy_x));
(void) memset(&entropy_xy,0,sizeof(entropy_xy));
(void) memset(&entropy_xy1,0,sizeof(entropy_xy1));
(void) memset(&entropy_xy2,0,sizeof(entropy_xy2));
(void) memset(&entropy_y,0,sizeof(entropy_y));
(void) memset(&variance,0,sizeof(variance));
for (i=0; i < (ssize_t) number_grays; i++)
{
cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,
sizeof(**cooccurrence));
Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q));
if ((cooccurrence[i] == (ChannelStatistics *) NULL) ||
(Q[i] == (ChannelStatistics *) NULL))
break;
(void) memset(cooccurrence[i],0,number_grays*
sizeof(**cooccurrence));
(void) memset(Q[i],0,number_grays*sizeof(**Q));
}
if (i < (ssize_t) number_grays)
{
for (i--; i >= 0; i--)
{
if (Q[i] != (ChannelStatistics *) NULL)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
if (cooccurrence[i] != (ChannelStatistics *) NULL)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
}
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Initialize spatial dependence matrix.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (r=0; r < (ssize_t) image->rows; r++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
ssize_t
offset,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,r,image->columns+
2*distance,distance+2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=distance*GetPixelChannels(image);;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < 4; i++)
{
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
offset=(ssize_t) distance;
break;
}
case 1:
{
/*
Vertical adjacency.
*/
offset=(ssize_t) (image->columns+2*distance);
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)+distance);
break;
}
}
u=0;
v=0;
while (grays[u].red != ScaleQuantumToMap(GetPixelRed(image,p)))
u++;
while (grays[v].red != ScaleQuantumToMap(GetPixelRed(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].red++;
cooccurrence[v][u].direction[i].red++;
u=0;
v=0;
while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(image,p)))
u++;
while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].green++;
cooccurrence[v][u].direction[i].green++;
u=0;
v=0;
while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(image,p)))
u++;
while (grays[v].blue != ScaleQuantumToMap(GetPixelBlue(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].blue++;
cooccurrence[v][u].direction[i].blue++;
if (image->colorspace == CMYKColorspace)
{
u=0;
v=0;
while (grays[u].black != ScaleQuantumToMap(GetPixelBlack(image,p)))
u++;
while (grays[v].black != ScaleQuantumToMap(GetPixelBlack(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].black++;
cooccurrence[v][u].direction[i].black++;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
u=0;
v=0;
while (grays[u].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p)))
u++;
while (grays[v].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].alpha++;
cooccurrence[v][u].direction[i].alpha++;
}
}
p+=GetPixelChannels(image);
}
}
grays=(PixelPacket *) RelinquishMagickMemory(grays);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Normalize spatial dependence matrix.
*/
for (i=0; i < 4; i++)
{
double
normalize;
register ssize_t
y;
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
normalize=2.0*image->rows*(image->columns-distance);
break;
}
case 1:
{
/*
Vertical adjacency.
*/
normalize=2.0*(image->rows-distance)*image->columns;
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
}
normalize=PerceptibleReciprocal(normalize);
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
cooccurrence[x][y].direction[i].red*=normalize;
cooccurrence[x][y].direction[i].green*=normalize;
cooccurrence[x][y].direction[i].blue*=normalize;
if (image->colorspace == CMYKColorspace)
cooccurrence[x][y].direction[i].black*=normalize;
if (image->alpha_trait != UndefinedPixelTrait)
cooccurrence[x][y].direction[i].alpha*=normalize;
}
}
}
/*
Compute texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Angular second moment: measure of homogeneity of the image.
*/
channel_features[RedPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].red*
cooccurrence[x][y].direction[i].red;
channel_features[GreenPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].green*
cooccurrence[x][y].direction[i].green;
channel_features[BluePixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].blue*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].black*
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].alpha*
cooccurrence[x][y].direction[i].alpha;
/*
Correlation: measure of linear-dependencies in the image.
*/
sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum[y].direction[i].black+=cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
sum[y].direction[i].alpha+=cooccurrence[x][y].direction[i].alpha;
correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red;
correlation.direction[i].green+=x*y*
cooccurrence[x][y].direction[i].green;
correlation.direction[i].blue+=x*y*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
correlation.direction[i].black+=x*y*
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
correlation.direction[i].alpha+=x*y*
cooccurrence[x][y].direction[i].alpha;
/*
Inverse Difference Moment.
*/
channel_features[RedPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1);
channel_features[GreenPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1);
channel_features[BluePixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].black/((y-x)*(y-x)+1);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].alpha/((y-x)*(y-x)+1);
/*
Sum average.
*/
density_xy[y+x+2].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[y+x+2].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[y+x+2].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[y+x+2].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_xy[y+x+2].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
/*
Entropy.
*/
channel_features[RedPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
channel_features[GreenPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
channel_features[BluePixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].black*
MagickLog10(cooccurrence[x][y].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].alpha*
MagickLog10(cooccurrence[x][y].direction[i].alpha);
/*
Information Measures of Correlation.
*/
density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->alpha_trait != UndefinedPixelTrait)
density_x[x].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
if (image->colorspace == CMYKColorspace)
density_x[x].direction[i].black+=
cooccurrence[x][y].direction[i].black;
density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_y[y].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_y[y].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
}
mean.direction[i].red+=y*sum[y].direction[i].red;
sum_squares.direction[i].red+=y*y*sum[y].direction[i].red;
mean.direction[i].green+=y*sum[y].direction[i].green;
sum_squares.direction[i].green+=y*y*sum[y].direction[i].green;
mean.direction[i].blue+=y*sum[y].direction[i].blue;
sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
{
mean.direction[i].black+=y*sum[y].direction[i].black;
sum_squares.direction[i].black+=y*y*sum[y].direction[i].black;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
mean.direction[i].alpha+=y*sum[y].direction[i].alpha;
sum_squares.direction[i].alpha+=y*y*sum[y].direction[i].alpha;
}
}
/*
Correlation: measure of linear-dependencies in the image.
*/
channel_features[RedPixelChannel].correlation[i]=
(correlation.direction[i].red-mean.direction[i].red*
mean.direction[i].red)/(sqrt(sum_squares.direction[i].red-
(mean.direction[i].red*mean.direction[i].red))*sqrt(
sum_squares.direction[i].red-(mean.direction[i].red*
mean.direction[i].red)));
channel_features[GreenPixelChannel].correlation[i]=
(correlation.direction[i].green-mean.direction[i].green*
mean.direction[i].green)/(sqrt(sum_squares.direction[i].green-
(mean.direction[i].green*mean.direction[i].green))*sqrt(
sum_squares.direction[i].green-(mean.direction[i].green*
mean.direction[i].green)));
channel_features[BluePixelChannel].correlation[i]=
(correlation.direction[i].blue-mean.direction[i].blue*
mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue-
(mean.direction[i].blue*mean.direction[i].blue))*sqrt(
sum_squares.direction[i].blue-(mean.direction[i].blue*
mean.direction[i].blue)));
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].correlation[i]=
(correlation.direction[i].black-mean.direction[i].black*
mean.direction[i].black)/(sqrt(sum_squares.direction[i].black-
(mean.direction[i].black*mean.direction[i].black))*sqrt(
sum_squares.direction[i].black-(mean.direction[i].black*
mean.direction[i].black)));
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].correlation[i]=
(correlation.direction[i].alpha-mean.direction[i].alpha*
mean.direction[i].alpha)/(sqrt(sum_squares.direction[i].alpha-
(mean.direction[i].alpha*mean.direction[i].alpha))*sqrt(
sum_squares.direction[i].alpha-(mean.direction[i].alpha*
mean.direction[i].alpha)));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=2; x < (ssize_t) (2*number_grays); x++)
{
/*
Sum average.
*/
channel_features[RedPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].red;
channel_features[GreenPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].green;
channel_features[BluePixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].alpha;
/*
Sum entropy.
*/
channel_features[RedPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BluePixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].black*
MagickLog10(density_xy[x].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].alpha*
MagickLog10(density_xy[x].direction[i].alpha);
/*
Sum variance.
*/
channel_features[RedPixelChannel].sum_variance[i]+=
(x-channel_features[RedPixelChannel].sum_entropy[i])*
(x-channel_features[RedPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].red;
channel_features[GreenPixelChannel].sum_variance[i]+=
(x-channel_features[GreenPixelChannel].sum_entropy[i])*
(x-channel_features[GreenPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].green;
channel_features[BluePixelChannel].sum_variance[i]+=
(x-channel_features[BluePixelChannel].sum_entropy[i])*
(x-channel_features[BluePixelChannel].sum_entropy[i])*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_variance[i]+=
(x-channel_features[BlackPixelChannel].sum_entropy[i])*
(x-channel_features[BlackPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_variance[i]+=
(x-channel_features[AlphaPixelChannel].sum_entropy[i])*
(x-channel_features[AlphaPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].alpha;
}
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Sum of Squares: Variance
*/
variance.direction[i].red+=(y-mean.direction[i].red+1)*
(y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red;
variance.direction[i].green+=(y-mean.direction[i].green+1)*
(y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green;
variance.direction[i].blue+=(y-mean.direction[i].blue+1)*
(y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].black+=(y-mean.direction[i].black+1)*
(y-mean.direction[i].black+1)*cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
variance.direction[i].alpha+=(y-mean.direction[i].alpha+1)*
(y-mean.direction[i].alpha+1)*
cooccurrence[x][y].direction[i].alpha;
/*
Sum average / Difference Variance.
*/
density_xy[MagickAbsoluteValue(y-x)].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[MagickAbsoluteValue(y-x)].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[MagickAbsoluteValue(y-x)].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_xy[MagickAbsoluteValue(y-x)].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
/*
Information Measures of Correlation.
*/
entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
entropy_xy.direction[i].black-=cooccurrence[x][y].direction[i].black*
MagickLog10(cooccurrence[x][y].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy.direction[i].alpha-=
cooccurrence[x][y].direction[i].alpha*MagickLog10(
cooccurrence[x][y].direction[i].alpha);
entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red*
MagickLog10(density_x[x].direction[i].red*density_y[y].direction[i].red));
entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green*
MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue*density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy1.direction[i].black-=(
cooccurrence[x][y].direction[i].black*MagickLog10(
density_x[x].direction[i].black*density_y[y].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy1.direction[i].alpha-=(
cooccurrence[x][y].direction[i].alpha*MagickLog10(
density_x[x].direction[i].alpha*density_y[y].direction[i].alpha));
entropy_xy2.direction[i].red-=(density_x[x].direction[i].red*
density_y[y].direction[i].red*MagickLog10(density_x[x].direction[i].red*
density_y[y].direction[i].red));
entropy_xy2.direction[i].green-=(density_x[x].direction[i].green*
density_y[y].direction[i].green*MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue*
density_y[y].direction[i].blue*MagickLog10(density_x[x].direction[i].blue*
density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy2.direction[i].black-=(density_x[x].direction[i].black*
density_y[y].direction[i].black*MagickLog10(
density_x[x].direction[i].black*density_y[y].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy2.direction[i].alpha-=(density_x[x].direction[i].alpha*
density_y[y].direction[i].alpha*MagickLog10(
density_x[x].direction[i].alpha*density_y[y].direction[i].alpha));
}
}
channel_features[RedPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].red;
channel_features[GreenPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].green;
channel_features[BluePixelChannel].variance_sum_of_squares[i]=
variance.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].alpha;
}
/*
Compute more texture features.
*/
(void) memset(&variance,0,sizeof(variance));
(void) memset(&sum_squares,0,sizeof(sum_squares));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Difference variance.
*/
variance.direction[i].red+=density_xy[x].direction[i].red;
variance.direction[i].green+=density_xy[x].direction[i].green;
variance.direction[i].blue+=density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].black+=density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
variance.direction[i].alpha+=density_xy[x].direction[i].alpha;
sum_squares.direction[i].red+=density_xy[x].direction[i].red*
density_xy[x].direction[i].red;
sum_squares.direction[i].green+=density_xy[x].direction[i].green*
density_xy[x].direction[i].green;
sum_squares.direction[i].blue+=density_xy[x].direction[i].blue*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum_squares.direction[i].black+=density_xy[x].direction[i].black*
density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
sum_squares.direction[i].alpha+=density_xy[x].direction[i].alpha*
density_xy[x].direction[i].alpha;
/*
Difference entropy.
*/
channel_features[RedPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BluePixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].black*
MagickLog10(density_xy[x].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].alpha*
MagickLog10(density_xy[x].direction[i].alpha);
/*
Information Measures of Correlation.
*/
entropy_x.direction[i].red-=(density_x[x].direction[i].red*
MagickLog10(density_x[x].direction[i].red));
entropy_x.direction[i].green-=(density_x[x].direction[i].green*
MagickLog10(density_x[x].direction[i].green));
entropy_x.direction[i].blue-=(density_x[x].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_x.direction[i].black-=(density_x[x].direction[i].black*
MagickLog10(density_x[x].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_x.direction[i].alpha-=(density_x[x].direction[i].alpha*
MagickLog10(density_x[x].direction[i].alpha));
entropy_y.direction[i].red-=(density_y[x].direction[i].red*
MagickLog10(density_y[x].direction[i].red));
entropy_y.direction[i].green-=(density_y[x].direction[i].green*
MagickLog10(density_y[x].direction[i].green));
entropy_y.direction[i].blue-=(density_y[x].direction[i].blue*
MagickLog10(density_y[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_y.direction[i].black-=(density_y[x].direction[i].black*
MagickLog10(density_y[x].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_y.direction[i].alpha-=(density_y[x].direction[i].alpha*
MagickLog10(density_y[x].direction[i].alpha));
}
/*
Difference variance.
*/
channel_features[RedPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].red)-
(variance.direction[i].red*variance.direction[i].red))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[GreenPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].green)-
(variance.direction[i].green*variance.direction[i].green))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[BluePixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].blue)-
(variance.direction[i].blue*variance.direction[i].blue))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].black)-
(variance.direction[i].black*variance.direction[i].black))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].alpha)-
(variance.direction[i].alpha*variance.direction[i].alpha))/
((double) number_grays*number_grays*number_grays*number_grays);
/*
Information Measures of Correlation.
*/
channel_features[RedPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/
(entropy_x.direction[i].red > entropy_y.direction[i].red ?
entropy_x.direction[i].red : entropy_y.direction[i].red);
channel_features[GreenPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/
(entropy_x.direction[i].green > entropy_y.direction[i].green ?
entropy_x.direction[i].green : entropy_y.direction[i].green);
channel_features[BluePixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/
(entropy_x.direction[i].blue > entropy_y.direction[i].blue ?
entropy_x.direction[i].blue : entropy_y.direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].black-entropy_xy1.direction[i].black)/
(entropy_x.direction[i].black > entropy_y.direction[i].black ?
entropy_x.direction[i].black : entropy_y.direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].alpha-entropy_xy1.direction[i].alpha)/
(entropy_x.direction[i].alpha > entropy_y.direction[i].alpha ?
entropy_x.direction[i].alpha : entropy_y.direction[i].alpha);
channel_features[RedPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].red-
entropy_xy.direction[i].red)))));
channel_features[GreenPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].green-
entropy_xy.direction[i].green)))));
channel_features[BluePixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].blue-
entropy_xy.direction[i].blue)))));
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].black-
entropy_xy.direction[i].black)))));
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].alpha-
entropy_xy.direction[i].alpha)))));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
ssize_t
z;
for (z=0; z < (ssize_t) number_grays; z++)
{
register ssize_t
y;
ChannelStatistics
pixel;
(void) memset(&pixel,0,sizeof(pixel));
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Contrast: amount of local variations present in an image.
*/
if (((y-x) == z) || ((x-y) == z))
{
pixel.direction[i].red+=cooccurrence[x][y].direction[i].red;
pixel.direction[i].green+=cooccurrence[x][y].direction[i].green;
pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
pixel.direction[i].black+=cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
pixel.direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
}
/*
Maximum Correlation Coefficient.
*/
if ((fabs(density_x[z].direction[i].red) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].red) > MagickEpsilon))
Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red*
cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/
density_y[x].direction[i].red;
if ((fabs(density_x[z].direction[i].green) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].red) > MagickEpsilon))
Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green*
cooccurrence[y][x].direction[i].green/
density_x[z].direction[i].green/density_y[x].direction[i].red;
if ((fabs(density_x[z].direction[i].blue) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].blue) > MagickEpsilon))
Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue*
cooccurrence[y][x].direction[i].blue/
density_x[z].direction[i].blue/density_y[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
if ((fabs(density_x[z].direction[i].black) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].black) > MagickEpsilon))
Q[z][y].direction[i].black+=cooccurrence[z][x].direction[i].black*
cooccurrence[y][x].direction[i].black/
density_x[z].direction[i].black/density_y[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
if ((fabs(density_x[z].direction[i].alpha) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].alpha) > MagickEpsilon))
Q[z][y].direction[i].alpha+=
cooccurrence[z][x].direction[i].alpha*
cooccurrence[y][x].direction[i].alpha/
density_x[z].direction[i].alpha/
density_y[x].direction[i].alpha;
}
}
channel_features[RedPixelChannel].contrast[i]+=z*z*
pixel.direction[i].red;
channel_features[GreenPixelChannel].contrast[i]+=z*z*
pixel.direction[i].green;
channel_features[BluePixelChannel].contrast[i]+=z*z*
pixel.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].contrast[i]+=z*z*
pixel.direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].contrast[i]+=z*z*
pixel.direction[i].alpha;
}
/*
Maximum Correlation Coefficient.
Future: return second largest eigenvalue of Q.
*/
channel_features[RedPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[GreenPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[BluePixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
}
/*
Relinquish resources.
*/
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
return(channel_features);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H o u g h L i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use HoughLineImage() in conjunction with any binary edge extracted image (we
% recommand Canny) to identify lines in the image. The algorithm accumulates
% counts for every white pixel for every possible orientation (for angles from
% 0 to 179 in 1 degree increments) and distance from the center of the image to
% the corner (in 1 px increments) and stores the counts in an accumulator
% matrix of angle vs distance. The size of the accumulator is 180x(diagonal/2).
% Next it searches this space for peaks in counts and converts the locations
% of the peaks to slope and intercept in the normal x,y input image space. Use
% the slope/intercepts to find the endpoints clipped to the bounds of the
% image. The lines are then drawn. The counts are a measure of the length of
% the lines.
%
% The format of the HoughLineImage method is:
%
% Image *HoughLineImage(const Image *image,const size_t width,
% const size_t height,const size_t threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find line pairs as local maxima in this neighborhood.
%
% o threshold: the line count threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define BoundingBox "viewbox"
DrawInfo
*draw_info;
Image
*image;
MagickBooleanType
status;
/*
Open image.
*/
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image->columns=columns;
image->rows=rows;
draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL);
draw_info->affine.sx=image->resolution.x == 0.0 ? 1.0 : image->resolution.x/
DefaultResolution;
draw_info->affine.sy=image->resolution.y == 0.0 ? 1.0 : image->resolution.y/
DefaultResolution;
image->columns=(size_t) (draw_info->affine.sx*image->columns);
image->rows=(size_t) (draw_info->affine.sy*image->rows);
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
if (SetImageBackgroundColor(image,exception) == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Render drawing.
*/
if (GetBlobStreamData(image) == (unsigned char *) NULL)
draw_info->primitive=FileToString(image->filename,~0UL,exception);
else
{
draw_info->primitive=(char *) AcquireQuantumMemory(1,(size_t)
GetBlobSize(image)+1);
if (draw_info->primitive != (char *) NULL)
{
(void) memcpy(draw_info->primitive,GetBlobStreamData(image),
(size_t) GetBlobSize(image));
draw_info->primitive[GetBlobSize(image)]='\0';
}
}
(void) DrawImage(image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
MagickExport Image *HoughLineImage(const Image *image,const size_t width,
const size_t height,const size_t threshold,ExceptionInfo *exception)
{
#define HoughLineImageTag "HoughLine/Image"
CacheView
*image_view;
char
message[MagickPathExtent],
path[MagickPathExtent];
const char
*artifact;
double
hough_height;
Image
*lines_image = NULL;
ImageInfo
*image_info;
int
file;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*accumulator;
PointInfo
center;
register ssize_t
y;
size_t
accumulator_height,
accumulator_width,
line_count;
/*
Create the accumulator.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
accumulator_width=180;
hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ?
image->rows : image->columns))/2.0);
accumulator_height=(size_t) (2.0*hough_height);
accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height,
sizeof(double),exception);
if (accumulator == (MatrixInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (NullMatrix(accumulator) == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Populate the accumulator.
*/
status=MagickTrue;
progress=0;
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelIntensity(image,p) > (QuantumRange/2.0))
{
register ssize_t
i;
for (i=0; i < 180; i++)
{
double
count,
radius;
radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+
(((double) y-center.y)*sin(DegreesToRadians((double) i)));
(void) GetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
count++;
(void) SetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
}
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
/*
Generate line segments from accumulator.
*/
file=AcquireUniqueFileResource(path);
if (file == -1)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
(void) FormatLocaleString(message,MagickPathExtent,
"# Hough line transform: %.20gx%.20g%+.20g\n",(double) width,
(double) height,(double) threshold);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MagickPathExtent,
"viewbox 0 0 %.20g %.20g\n",(double) image->columns,(double) image->rows);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MagickPathExtent,
"# x1,y1 x2,y2 # count angle distance\n");
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
line_count=image->columns > image->rows ? image->columns/4 : image->rows/4;
if (threshold != 0)
line_count=threshold;
for (y=0; y < (ssize_t) accumulator_height; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) accumulator_width; x++)
{
double
count;
(void) GetMatrixElement(accumulator,x,y,&count);
if (count >= (double) line_count)
{
double
maxima;
SegmentInfo
line;
ssize_t
v;
/*
Is point a local maxima?
*/
maxima=count;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((u != 0) || (v !=0))
{
(void) GetMatrixElement(accumulator,x+u,y+v,&count);
if (count > maxima)
{
maxima=count;
break;
}
}
}
if (u < (ssize_t) (width/2))
break;
}
(void) GetMatrixElement(accumulator,x,y,&count);
if (maxima > count)
continue;
if ((x >= 45) && (x <= 135))
{
/*
y = (r-x cos(t))/sin(t)
*/
line.x1=0.0;
line.y1=((double) (y-(accumulator_height/2.0))-((line.x1-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
line.x2=(double) image->columns;
line.y2=((double) (y-(accumulator_height/2.0))-((line.x2-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
}
else
{
/*
x = (r-y cos(t))/sin(t)
*/
line.y1=0.0;
line.x1=((double) (y-(accumulator_height/2.0))-((line.y1-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
line.y2=(double) image->rows;
line.x2=((double) (y-(accumulator_height/2.0))-((line.y2-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
}
(void) FormatLocaleString(message,MagickPathExtent,
"line %g,%g %g,%g # %g %g %g\n",line.x1,line.y1,line.x2,line.y2,
maxima,(double) x,(double) y);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
}
}
}
(void) close(file);
/*
Render lines to image canvas.
*/
image_info=AcquireImageInfo();
image_info->background_color=image->background_color;
(void) FormatLocaleString(image_info->filename,MagickPathExtent,"%s",path);
artifact=GetImageArtifact(image,"background");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"background",artifact);
artifact=GetImageArtifact(image,"fill");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"fill",artifact);
artifact=GetImageArtifact(image,"stroke");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"stroke",artifact);
artifact=GetImageArtifact(image,"strokewidth");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"strokewidth",artifact);
lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception);
artifact=GetImageArtifact(image,"hough-lines:accumulator");
if ((lines_image != (Image *) NULL) &&
(IsStringTrue(artifact) != MagickFalse))
{
Image
*accumulator_image;
accumulator_image=MatrixToImage(accumulator,exception);
if (accumulator_image != (Image *) NULL)
AppendImageToList(&lines_image,accumulator_image);
}
/*
Free resources.
*/
accumulator=DestroyMatrixInfo(accumulator);
image_info=DestroyImageInfo(image_info);
(void) RelinquishUniqueFileResource(path);
return(GetFirstImageInList(lines_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M e a n S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MeanShiftImage() delineate arbitrarily shaped clusters in the image. For
% each pixel, it visits all the pixels in the neighborhood specified by
% the window centered at the pixel and excludes those that are outside the
% radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those
% that are within the specified color distance from the current mean, and
% computes a new x,y centroid from those coordinates and a new mean. This new
% x,y centroid is used as the center for a new window. This process iterates
% until it converges and the final mean is replaces the (original window
% center) pixel value. It repeats this process for the next pixel, etc.,
% until it processes all pixels in the image. Results are typically better with
% colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr.
%
% The format of the MeanShiftImage method is:
%
% Image *MeanShiftImage(const Image *image,const size_t width,
% const size_t height,const double color_distance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find pixels in this neighborhood.
%
% o color_distance: the color distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MeanShiftImage(const Image *image,const size_t width,
const size_t height,const double color_distance,ExceptionInfo *exception)
{
#define MaxMeanShiftIterations 100
#define MeanShiftImageTag "MeanShift/Image"
CacheView
*image_view,
*mean_view,
*pixel_view;
Image
*mean_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
mean_image=CloneImage(image,0,0,MagickTrue,exception);
if (mean_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(mean_image,DirectClass,exception) == MagickFalse)
{
mean_image=DestroyImage(mean_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
pixel_view=AcquireVirtualCacheView(image,exception);
mean_view=AcquireAuthenticCacheView(mean_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,progress) \
magick_number_threads(mean_image,mean_image,mean_image->rows,1)
#endif
for (y=0; y < (ssize_t) mean_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) mean_image->columns; x++)
{
PixelInfo
mean_pixel,
previous_pixel;
PointInfo
mean_location,
previous_location;
register ssize_t
i;
GetPixelInfo(image,&mean_pixel);
GetPixelInfoPixel(image,p,&mean_pixel);
mean_location.x=(double) x;
mean_location.y=(double) y;
for (i=0; i < MaxMeanShiftIterations; i++)
{
double
distance,
gamma;
PixelInfo
sum_pixel;
PointInfo
sum_location;
ssize_t
count,
v;
sum_location.x=0.0;
sum_location.y=0.0;
GetPixelInfo(image,&sum_pixel);
previous_location=mean_location;
previous_pixel=mean_pixel;
count=0;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2)))
{
PixelInfo
pixel;
status=GetOneCacheViewVirtualPixelInfo(pixel_view,(ssize_t)
MagickRound(mean_location.x+u),(ssize_t) MagickRound(
mean_location.y+v),&pixel,exception);
distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+
(mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+
(mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue);
if (distance <= (color_distance*color_distance))
{
sum_location.x+=mean_location.x+u;
sum_location.y+=mean_location.y+v;
sum_pixel.red+=pixel.red;
sum_pixel.green+=pixel.green;
sum_pixel.blue+=pixel.blue;
sum_pixel.alpha+=pixel.alpha;
count++;
}
}
}
}
gamma=PerceptibleReciprocal(count);
mean_location.x=gamma*sum_location.x;
mean_location.y=gamma*sum_location.y;
mean_pixel.red=gamma*sum_pixel.red;
mean_pixel.green=gamma*sum_pixel.green;
mean_pixel.blue=gamma*sum_pixel.blue;
mean_pixel.alpha=gamma*sum_pixel.alpha;
distance=(mean_location.x-previous_location.x)*
(mean_location.x-previous_location.x)+
(mean_location.y-previous_location.y)*
(mean_location.y-previous_location.y)+
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)*
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)*
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)*
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue);
if (distance <= 3.0)
break;
}
SetPixelRed(mean_image,ClampToQuantum(mean_pixel.red),q);
SetPixelGreen(mean_image,ClampToQuantum(mean_pixel.green),q);
SetPixelBlue(mean_image,ClampToQuantum(mean_pixel.blue),q);
SetPixelAlpha(mean_image,ClampToQuantum(mean_pixel.alpha),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(mean_image);
}
if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MeanShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
mean_view=DestroyCacheView(mean_view);
pixel_view=DestroyCacheView(pixel_view);
image_view=DestroyCacheView(image_view);
return(mean_image);
}
|
GB_unaryop__lnot_int8_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int8_bool
// op(A') function: GB_tran__lnot_int8_bool
// C type: int8_t
// A type: bool
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
bool
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int8_bool
(
int8_t *Cx, // Cx and Ax may be aliased
bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int8_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.